focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static PathPasteboard getPasteboard(final Host bookmark) { if(null == bookmark) { return null; } if(!pasteboards.containsKey(bookmark)) { pasteboards.put(bookmark, new PathPasteboard(bookmark)); } return pasteboards.get(bookmark); }
@Test public void testGetPasteboard() { final Host s = new Host(new TestProtocol(Scheme.ftp), "l"); final PathPasteboard pasteboard = PathPasteboardFactory.getPasteboard(s); assertNotNull(pasteboard); assertEquals(pasteboard, PathPasteboardFactory.getPasteboard(s)); assertSame(pasteboard, PathPasteboardFactory.getPasteboard(s)); }
@Override public R get(Object key) { if (key instanceof Integer) { int n = (Integer) key; return get(n); } return super.get(key); }
@Test public void lookup() { assertNull(a.get(0)); a.get(1).asserts(1); assertNull(a.get(2)); a.get(3).asserts(3); assertNull(a.get(4)); a.get(5).asserts(5); assertNull(a.get(6)); assertNull(b.get(1)); assertNull(b.get(3)); assertNull(b.get(5)); }
public static char toCloseChar(char c) { int result = c; if (c >= '1' && c <= '9') { result = '①' + c - '1'; } else if (c >= 'A' && c <= 'Z') { result = 'Ⓐ' + c - 'A'; } else if (c >= 'a' && c <= 'z') { result = 'ⓐ' + c - 'a'; } return (char) result; }
@Test public void toCloseCharTest(){ assertEquals('②', CharUtil.toCloseChar('2')); assertEquals('Ⓜ', CharUtil.toCloseChar('M')); assertEquals('ⓡ', CharUtil.toCloseChar('r')); }
public void setConnectionId(String connectionId) { this.connectionId = connectionId; }
@Test void testSetConnectionId() { assertNull(connection.getConnectionId()); connection.setConnectionId("testConnectionId"); assertEquals("testConnectionId", connection.getConnectionId()); }
public String nextNonCliCommand() { String line; do { line = terminal.readLine(); } while (maybeHandleCliSpecificCommands(line)); return line; }
@Test public void shouldSupportOtherWhitespaceBetweenCliCommandAndArgs() { // Given: when(lineSupplier.get()) .thenReturn(CLI_CMD_NAME + "\tArg0" + WHITE_SPACE + "'Arg 1'") .thenReturn("not a CLI command;"); // When: console.nextNonCliCommand(); // Then: verify(cliCommand).execute(eq(ImmutableList.of("Arg0", "Arg 1")), any()); }
public Span toSpan(TraceContext context) { return toSpan(null, context); }
@Test void toSpan_decorates_unsampled() { propagationFactory = baggageFactory; TraceContext incoming = TraceContext.newBuilder().traceId(1L).spanId(2L).sampled(false).build(); TraceContext toSpan = tracer.toSpan(incoming).context(); assertThat(toSpan).isNotSameAs(incoming); assertThat(toSpan.extra()).isNotEmpty(); }
@Override public PageResult<ArticleDO> getArticlePage(ArticlePageReqVO pageReqVO) { return articleMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetArticlePage() { // mock 数据 ArticleDO dbArticle = randomPojo(ArticleDO.class, o -> { // 等会查询到 o.setCategoryId(null); o.setTitle(null); o.setAuthor(null); o.setPicUrl(null); o.setIntroduction(null); o.setBrowseCount(null); o.setSort(null); o.setStatus(null); o.setSpuId(null); o.setRecommendHot(null); o.setRecommendBanner(null); o.setContent(null); o.setCreateTime(null); }); articleMapper.insert(dbArticle); // 测试 categoryId 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setCategoryId(null))); // 测试 title 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setTitle(null))); // 测试 author 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setAuthor(null))); // 测试 picUrl 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setPicUrl(null))); // 测试 introduction 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setIntroduction(null))); // 测试 browseCount 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setBrowseCount(null))); // 测试 sort 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setSort(null))); // 测试 status 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setStatus(null))); // 测试 spuId 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setSpuId(null))); // 测试 recommendHot 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setRecommendHot(null))); // 测试 recommendBanner 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setRecommendBanner(null))); // 测试 content 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setContent(null))); // 测试 createTime 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setCreateTime(null))); // 准备参数 ArticlePageReqVO reqVO = new ArticlePageReqVO(); reqVO.setCategoryId(null); reqVO.setTitle(null); reqVO.setAuthor(null); reqVO.setStatus(null); reqVO.setSpuId(null); reqVO.setRecommendHot(null); reqVO.setRecommendBanner(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<ArticleDO> pageResult = articleService.getArticlePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbArticle, pageResult.getList().get(0)); }
@SneakyThrows(JsonProcessingException.class) public static String toJsonString(final Object obj) { return MAPPER.writeValueAsString(obj); }
@Test void assertToJsonStringOnNull() { assertThat(JsonUtils.toJsonString(null), is("null")); }
static <T> Supplier<T> decorateSupplier(Observation observation, Supplier<T> supplier) { return () -> observation.observe(supplier); }
@Test public void shouldDecorateSupplierAndReturnWithException() throws Throwable { given(helloWorldService.returnHelloWorld()).willThrow(new RuntimeException("BAM!")); Supplier<String> supplier = Observations .decorateSupplier(observation, helloWorldService::returnHelloWorld); Try<String> result = Try.of(supplier::get); assertThat(result.isFailure()).isTrue(); assertThat(result.failed().get()).isInstanceOf(RuntimeException.class); assertThat(observationRegistry) .hasSingleObservationThat() .hasNameEqualTo(ObservationsTest.class.getName()) .hasBeenStarted() .hasBeenStopped() .assertThatError() .isInstanceOf(RuntimeException.class); then(helloWorldService).should(times(1)).returnHelloWorld(); }
@Override public void broadcastOnIssueChange(List<DefaultIssue> issues, Collection<QGChangeEvent> changeEvents, boolean fromAlm) { if (listeners.isEmpty() || issues.isEmpty() || changeEvents.isEmpty()) { return; } try { broadcastChangeEventsToBranches(issues, changeEvents, fromAlm); } catch (Error e) { LOG.warn(format("Broadcasting to listeners failed for %s events", changeEvents.size()), e); } }
@Test public void broadcastOnIssueChange_has_no_effect_when_no_changeEvent() { underTest.broadcastOnIssueChange(oneIssueOnComponent1, emptySet(), false); verifyNoInteractions(listener1, listener2, listener3); }
@Nullable @VisibleForTesting static List<String> computeEntrypoint( RawConfiguration rawConfiguration, ProjectProperties projectProperties, JibContainerBuilder jibContainerBuilder) throws MainClassInferenceException, InvalidAppRootException, IOException, InvalidContainerizingModeException { Optional<List<String>> rawEntrypoint = rawConfiguration.getEntrypoint(); List<String> rawExtraClasspath = rawConfiguration.getExtraClasspath(); boolean entrypointDefined = rawEntrypoint.isPresent() && !rawEntrypoint.get().isEmpty(); if (entrypointDefined && (rawConfiguration.getMainClass().isPresent() || !rawConfiguration.getJvmFlags().isEmpty() || !rawExtraClasspath.isEmpty() || rawConfiguration.getExpandClasspathDependencies())) { projectProperties.log( LogEvent.info( "mainClass, extraClasspath, jvmFlags, and expandClasspathDependencies are ignored " + "when entrypoint is specified")); } if (projectProperties.isWarProject()) { if (entrypointDefined) { return rawEntrypoint.get().size() == 1 && "INHERIT".equals(rawEntrypoint.get().get(0)) ? null : rawEntrypoint.get(); } if (rawConfiguration.getMainClass().isPresent() || !rawConfiguration.getJvmFlags().isEmpty() || !rawExtraClasspath.isEmpty() || rawConfiguration.getExpandClasspathDependencies()) { projectProperties.log( LogEvent.warn( "mainClass, extraClasspath, jvmFlags, and expandClasspathDependencies are ignored " + "for WAR projects")); } return rawConfiguration.getFromImage().isPresent() ? null // Inherit if a custom base image. : Arrays.asList("java", "-jar", "/usr/local/jetty/start.jar", "--module=ee10-deploy"); } List<String> classpath = new ArrayList<>(rawExtraClasspath); AbsoluteUnixPath appRoot = getAppRootChecked(rawConfiguration, projectProperties); ContainerizingMode mode = getContainerizingModeChecked(rawConfiguration, projectProperties); switch (mode) { case EXPLODED: classpath.add(appRoot.resolve("resources").toString()); classpath.add(appRoot.resolve("classes").toString()); break; case PACKAGED: classpath.add(appRoot.resolve("classpath/*").toString()); break; default: throw new IllegalStateException("unknown containerizing mode: " + mode); } if (projectProperties.getMajorJavaVersion() >= 9 || rawConfiguration.getExpandClasspathDependencies()) { List<Path> jars = projectProperties.getDependencies(); Map<String, Long> occurrences = jars.stream() .map(path -> path.getFileName().toString()) .collect(Collectors.groupingBy(filename -> filename, Collectors.counting())); List<String> duplicates = occurrences.entrySet().stream() .filter(entry -> entry.getValue() > 1) .map(Map.Entry::getKey) .collect(Collectors.toList()); for (Path jar : jars) { // Handle duplicates by appending filesize to the end of the file. This renaming logic // must be in sync with the code that does the same in the other place. See // https://github.com/GoogleContainerTools/jib/issues/3331 String jarName = jar.getFileName().toString(); if (duplicates.contains(jarName)) { jarName = jarName.replaceFirst("\\.jar$", "-" + Files.size(jar)) + ".jar"; } classpath.add(appRoot.resolve("libs").resolve(jarName).toString()); } } else { classpath.add(appRoot.resolve("libs/*").toString()); } String classpathString = String.join(":", classpath); String mainClass; try { mainClass = MainClassResolver.resolveMainClass( rawConfiguration.getMainClass().orElse(null), projectProperties); } catch (MainClassInferenceException ex) { if (entrypointDefined) { // We will use the user-given entrypoint, so don't fail. mainClass = "could-not-infer-a-main-class"; } else { throw ex; } } addJvmArgFilesLayer( rawConfiguration, projectProperties, jibContainerBuilder, classpathString, mainClass); if (projectProperties.getMajorJavaVersion() >= 9) { classpathString = "@" + appRoot.resolve(JIB_CLASSPATH_FILE); } if (entrypointDefined) { return rawEntrypoint.get().size() == 1 && "INHERIT".equals(rawEntrypoint.get().get(0)) ? null : rawEntrypoint.get(); } List<String> entrypoint = new ArrayList<>(4 + rawConfiguration.getJvmFlags().size()); entrypoint.add("java"); entrypoint.addAll(rawConfiguration.getJvmFlags()); entrypoint.add("-cp"); entrypoint.add(classpathString); entrypoint.add(mainClass); return entrypoint; }
@Test public void testComputeEntrypoint_expandClasspathDependencies() throws MainClassInferenceException, InvalidAppRootException, IOException, InvalidContainerizingModeException { when(rawConfiguration.getExpandClasspathDependencies()).thenReturn(true); assertThat( PluginConfigurationProcessor.computeEntrypoint( rawConfiguration, projectProperties, jibContainerBuilder)) .containsExactly( "java", "-cp", "/app/resources:/app/classes:/app/libs/foo-1.jar:/app/libs/bar-2.jar", "java.lang.Object") .inOrder(); }
@SuppressWarnings("unchecked") public static void validateResponse(HttpURLConnection conn, int expectedStatus) throws IOException { if (conn.getResponseCode() != expectedStatus) { Exception toThrow; InputStream es = null; try { es = conn.getErrorStream(); Map json = JsonSerialization.mapReader().readValue(es); json = (Map) json.get(ERROR_JSON); String exClass = (String) json.get(ERROR_CLASSNAME_JSON); String exMsg = (String) json.get(ERROR_MESSAGE_JSON); if (exClass != null) { try { ClassLoader cl = HttpExceptionUtils.class.getClassLoader(); Class klass = cl.loadClass(exClass); Preconditions.checkState(Exception.class.isAssignableFrom(klass), "Class [%s] is not a subclass of Exception", klass); MethodHandle methodHandle = PUBLIC_LOOKUP.findConstructor( klass, EXCEPTION_CONSTRUCTOR_TYPE); toThrow = (Exception) methodHandle.invoke(exMsg); } catch (Throwable t) { toThrow = new IOException(String.format( "HTTP status [%d], exception [%s], message [%s], URL [%s]", conn.getResponseCode(), exClass, exMsg, conn.getURL())); } } else { String msg = (exMsg != null) ? exMsg : conn.getResponseMessage(); toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s]", conn.getResponseCode(), msg, conn.getURL())); } } catch (Exception ex) { toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s], exception [%s]", conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(), ex.toString()), ex); } finally { if (es != null) { try { es.close(); } catch (IOException ex) { //ignore } } } throwEx(toThrow); } }
@Test public void testValidateResponseFailNoErrorMessage() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); LambdaTestUtils.intercept(IOException.class, () -> HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED)); }
public ExitStatus(Options options) { this.options = options; }
@Test void wip_with_undefined_scenarios() { createWipRuntime(); bus.send(testCaseFinishedWithStatus(Status.UNDEFINED)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x0))); }
public String getRelative() { if (elements.isEmpty()) { return ""; } StringBuilder sb = new StringBuilder(); sb.append(elements.get(0)); for (int i = 1; i < elements.size(); i++) { sb.append(delimiter); sb.append(elements.get(i)); } return sb.toString(); }
@Test public void testGetPath() { assertEquals("foo/bar/baz", getAbsolutePath().getRelative()); assertEquals("foo/bar/baz", getRelativePath().getRelative()); assertEquals("foo/bar/baz", getWithSlashes().getRelative()); assertEquals("foo/bar/baz", getAppended().getRelative()); assertEquals("foo", getOne().getRelative()); }
@PatchMapping public ResponseEntity<?> updateProduct(@PathVariable("productId") int productId, @Valid @RequestBody UpdateProductPayload payload, BindingResult bindingResult) throws BindException { if (bindingResult.hasErrors()) { if (bindingResult instanceof BindException exception) { throw exception; } else { throw new BindException(bindingResult); } } else { this.productService.updateProduct(productId, payload.title(), payload.details()); return ResponseEntity.noContent() .build(); } }
@Test void updateProduct_RequestIsInvalidAndBindResultIsBindException_ReturnsBadRequest() { // given var payload = new UpdateProductPayload(" ", null); var bindingResult = new BindException(new MapBindingResult(Map.of(), "payload")); bindingResult.addError(new FieldError("payload", "title", "error")); // when var exception = assertThrows(BindException.class, () -> this.controller.updateProduct(1, payload, bindingResult)); // then assertEquals(List.of(new FieldError("payload", "title", "error")), exception.getAllErrors()); verifyNoInteractions(this.productService); }
@Override public List<String> getDiagnostics() { readLock.lock(); try { return diagnostics; } finally { readLock.unlock(); } }
@Test public void testMetaInfoSizeOverMax() throws Exception { Configuration conf = new Configuration(); JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); MRAppMetrics mrAppMetrics = MRAppMetrics.create(); JobImpl job = new JobImpl(jobId, ApplicationAttemptId.newInstance( ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class), null, new JobTokenSecretManager(), new Credentials(), null, null, mrAppMetrics, null, true, null, 0, null, null, null, null); InitTransition initTransition = new InitTransition() { @Override protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) { throw new YarnRuntimeException(EXCEPTIONMSG); } }; JobEvent mockJobEvent = mock(JobEvent.class); JobStateInternal jobSI = initTransition.transition(job, mockJobEvent); Assert.assertTrue("When init fails, return value from InitTransition.transition should equal NEW.", jobSI.equals(JobStateInternal.NEW)); Assert.assertTrue("Job diagnostics should contain YarnRuntimeException", job.getDiagnostics().toString().contains("YarnRuntimeException")); Assert.assertTrue("Job diagnostics should contain " + EXCEPTIONMSG, job.getDiagnostics().toString().contains(EXCEPTIONMSG)); }
@Override public String getHelpMessage() { return HELP; }
@Test public void shouldGetHelp() { assertThat(requestPipeliningCommand.getHelpMessage(), containsString("View the current setting")); assertThat(requestPipeliningCommand.getHelpMessage(), containsString("Update the setting as specified.")); }
@Override public TableStatistics getTableStatistics( ConnectorSession session, SchemaTableName table, Map<String, ColumnHandle> columns, Map<String, Type> columnTypes, List<HivePartition> partitions) { if (!isStatisticsEnabled(session)) { return TableStatistics.empty(); } if (partitions.isEmpty()) { return createZeroStatistics(columns, columnTypes); } int sampleSize = getPartitionStatisticsSampleSize(session); List<HivePartition> partitionsSample = getPartitionsSample(partitions, sampleSize); try { Map<String, PartitionStatistics> statisticsSample = statisticsProvider.getPartitionsStatistics(session, table, partitionsSample); validatePartitionStatistics(table, statisticsSample); return getTableStatistics(columns, columnTypes, partitions, statisticsSample); } catch (PrestoException e) { if (e.getErrorCode().equals(HIVE_CORRUPTED_COLUMN_STATISTICS.toErrorCode()) && isIgnoreCorruptedStatistics(session)) { log.error(e); return TableStatistics.empty(); } throw e; } }
@Test public void testGetTableStatisticsValidationFailure() { PartitionStatistics corruptedStatistics = PartitionStatistics.builder() .setBasicStatistics(new HiveBasicStatistics(-1, 0, 0, 0)) .build(); String partitionName = "p1=string1/p2=1234"; MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, corruptedStatistics), quickStatsProvider); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties( new HiveClientConfig().setIgnoreCorruptedStatistics(false), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()) .getSessionProperties()); assertThatThrownBy(() -> statisticsProvider.getTableStatistics( session, TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition(partitionName)))) .isInstanceOf(PrestoException.class) .hasFieldOrPropertyWithValue("errorCode", HIVE_CORRUPTED_COLUMN_STATISTICS.toErrorCode()); TestingConnectorSession ignoreSession = new TestingConnectorSession(new HiveSessionProperties( new HiveClientConfig().setIgnoreCorruptedStatistics(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()) .getSessionProperties()); assertEquals( statisticsProvider.getTableStatistics( ignoreSession, TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition(partitionName))), TableStatistics.empty()); }
@Override public byte[] serialize(final String topic, final ValueAndTimestamp<V> data) { if (data == null) { return null; } return serialize(topic, data.value(), data.timestamp()); }
@Test public void shouldSerializeNullDataAsNull() { final byte[] serialized = STRING_SERDE.serializer().serialize(TOPIC, ValueAndTimestamp.make(null, TIMESTAMP)); assertThat(serialized, is(nullValue())); }
public static NetconfRpcReply parseRpcReply(CharSequence xml) { XMLInputFactory xif = XMLInputFactory.newFactory(); try { XMLStreamReader xsr = xif.createXMLStreamReader(CharSource.wrap(xml).openStream()); return parseRpcReply(xsr); } catch (XMLStreamException | IOException e) { log.error("Exception thrown creating XMLStreamReader", e); return null; } }
@Test public void testGetResponseParse() { NetconfRpcReply rep = NetconfRpcParserUtil.parseRpcReply(RESPONSE_DATA1); assertThat(rep.isOk(), is(false)); assertThat(rep.messageId(), is("5")); assertThat(rep.responses(), hasSize(1)); assertThat(rep.responses().get(0), is(both(startsWith("<data")).and(endsWith("</data>")))); // Can't do below: response contains xmlns attribute for netconf base //assertThat(rep.responses().get(0), is(RESPONSE_BODY1)); }
@Udf public <T> List<T> remove( @UdfParameter(description = "Array of values") final List<T> array, @UdfParameter(description = "Value to remove") final T victim) { if (array == null) { return null; } return array.stream() .filter(el -> !Objects.equals(el, victim)) .collect(Collectors.toList()); }
@SuppressWarnings("unchecked") @Test public void shouldRemoveMap() { final Map<String, Integer> map1 = ImmutableMap.of("foo", 1, "bar", 2, "baz", 3); final Map<String, Integer> map2 = ImmutableMap.of("foo", 10, "baz", 3); final Map<String, Integer> map3 = ImmutableMap.of("foo", 1, "bar", 2, "baz", 3); final List<Map<String, Integer>> input1 = Arrays.asList(map1, map2, map3); final Map<String, Integer> input2 = map3; final List<Map<String, Integer>> result = udf.remove(input1, input2); assertThat(result, contains(map2)); }
public static NamespaceName get(String tenant, String namespace) { validateNamespaceName(tenant, namespace); return get(tenant + '/' + namespace); }
@Test(expectedExceptions = IllegalArgumentException.class) public void namespace_propertyNamespace() { NamespaceName.get("property.namespace"); }
@Override public ConnectorPageSource createPageSource( ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableLayoutHandle layout, List<ColumnHandle> columns, SplitContext splitContext, RuntimeStats runtimeStats) { HiveTableLayoutHandle hiveLayout = (HiveTableLayoutHandle) layout; List<HiveColumnHandle> selectedColumns = columns.stream() .map(HiveColumnHandle.class::cast) .collect(toList()); HiveSplit hiveSplit = (HiveSplit) split; Path path = new Path(hiveSplit.getFileSplit().getPath()); Configuration configuration = hdfsEnvironment.getConfiguration( new HdfsContext( session, hiveSplit.getDatabase(), hiveSplit.getTable(), hiveLayout.getTablePath(), false), path); Optional<EncryptionInformation> encryptionInformation = hiveSplit.getEncryptionInformation(); CacheQuota cacheQuota = generateCacheQuota(hiveSplit); HiveFileContext fileContext = new HiveFileContext( splitContext.isCacheable(), cacheQuota, hiveSplit.getFileSplit().getExtraFileInfo().map(BinaryExtraHiveFileInfo::new), OptionalLong.of(hiveSplit.getFileSplit().getFileSize()), OptionalLong.of(hiveSplit.getFileSplit().getStart()), OptionalLong.of(hiveSplit.getFileSplit().getLength()), hiveSplit.getFileSplit().getFileModifiedTime(), HiveSessionProperties.isVerboseRuntimeStatsEnabled(session), runtimeStats); if (columns.stream().anyMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED))) { checkArgument(columns.stream().allMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED)), "Not all columns are of 'AGGREGATED' type"); if (hiveLayout.isFooterStatsUnreliable()) { throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Partial aggregation pushdown is not supported when footer stats are unreliable. " + "Table %s has file %s with unreliable footer stats. " + "Set session property [catalog-name].pushdown_partial_aggregations_into_scan=false and execute query again.", hiveLayout.getSchemaTableName(), hiveSplit.getFileSplit().getPath())); } return createAggregatedPageSource(aggregatedPageSourceFactories, configuration, session, hiveSplit, hiveLayout, selectedColumns, fileContext, encryptionInformation); } if (hiveLayout.isPushdownFilterEnabled()) { Optional<ConnectorPageSource> selectivePageSource = createSelectivePageSource( selectivePageSourceFactories, configuration, session, hiveSplit, hiveLayout, selectedColumns, hiveStorageTimeZone, typeManager, optimizedRowExpressionCache, splitContext, fileContext, encryptionInformation); if (selectivePageSource.isPresent()) { return selectivePageSource.get(); } } TupleDomain<HiveColumnHandle> effectivePredicate = hiveLayout.getDomainPredicate() .transform(Subfield::getRootName) .transform(hiveLayout.getPredicateColumns()::get); if (shouldSkipBucket(hiveLayout, hiveSplit, splitContext, isLegacyTimestampBucketing(session))) { return new HiveEmptySplitPageSource(); } if (shouldSkipPartition(typeManager, hiveLayout, hiveStorageTimeZone, hiveSplit, splitContext)) { return new HiveEmptySplitPageSource(); } Optional<ConnectorPageSource> pageSource = createHivePageSource( cursorProviders, pageSourceFactories, configuration, session, hiveSplit.getFileSplit(), hiveSplit.getTableBucketNumber(), hiveSplit.getStorage(), splitContext.getDynamicFilterPredicate().map(filter -> filter.transform(handle -> (HiveColumnHandle) handle).intersect(effectivePredicate)).orElse(effectivePredicate), selectedColumns, hiveLayout.getPredicateColumns(), hiveSplit.getPartitionKeys(), hiveStorageTimeZone, typeManager, hiveLayout.getSchemaTableName(), hiveLayout.getPartitionColumns().stream().map(HiveColumnHandle.class::cast).collect(toList()), hiveLayout.getDataColumns(), hiveLayout.getTableParameters(), hiveSplit.getPartitionDataColumnCount(), hiveSplit.getTableToPartitionMapping(), hiveSplit.getBucketConversion(), hiveSplit.isS3SelectPushdownEnabled(), fileContext, hiveLayout.getRemainingPredicate(), hiveLayout.isPushdownFilterEnabled(), rowExpressionService, encryptionInformation, hiveSplit.getRowIdPartitionComponent()); if (pageSource.isPresent()) { return pageSource.get(); } throw new IllegalStateException("Could not find a file reader for split " + hiveSplit); }
@Test public void testWrapsInFilteringPageSourceWhenNoSelectivePageSource() { HivePageSourceProvider pageSourceProvider = createPageSourceProvider(); ConnectorPageSource pageSource = pageSourceProvider.createPageSource( new HiveTransactionHandle(), SESSION, getHiveSplit(RCBINARY), getHiveTableLayout(true, false, false), ImmutableList.of(), new SplitContext(false), new RuntimeStats()); assertTrue(pageSource instanceof FilteringPageSource, format("pageSource was %s", pageSource.getClass().getSimpleName())); }
public static FindKV findKV(String regex, int keyGroup, int valueGroup) { return findKV(Pattern.compile(regex), keyGroup, valueGroup); }
@Test @Category(NeedsRunner.class) public void testKVFindNone() { PCollection<KV<String, String>> output = p.apply(Create.of("x y z")).apply(Regex.findKV("a (b) (c)", 1, 2)); PAssert.that(output).empty(); p.run(); }
@Override protected Void doInBackground(Void... params) { schemaList = getDbTableSchema(schemaCursor); contentList = getDbTableDetails(contentCursor); return null; }
@Test public void testExecute() { SQLiteDatabase sqLiteDatabase = SQLiteDatabase.openDatabase( "src/test/resources/test.db", null, SQLiteDatabase.OPEN_READONLY); assertNotNull(sqLiteDatabase); DbViewerFragment mock = mock(DbViewerFragment.class); AppCompatTextView loadingText = new AppCompatTextView(ApplicationProvider.getApplicationContext()); mock.loadingText = loadingText; Cursor schemaCursor = sqLiteDatabase.rawQuery("PRAGMA table_info('users');", null); Cursor contentCursor = sqLiteDatabase.rawQuery("SELECT * FROM users", null); DbViewerTask task = new DbViewerTask(schemaCursor, contentCursor, webView, mock); task.doInBackground(); shadowOf(getMainLooper()).idle(); assertNotNull(task.schemaList); assertNotNull(task.contentList); // 3 columns assertEquals(3, task.schemaList.size()); // 4 records assertEquals(4, task.contentList.size()); assertEquals("4 records loaded", loadingText.getText().toString()); sqLiteDatabase.close(); }
@Override public QueryActionStats execute(Statement statement, QueryStage queryStage) { return execute(statement, queryStage, new NoResultStatementExecutor<>()); }
@Test public void testQueryFailed() { try { prestoAction.execute( sqlParser.createStatement("SELECT * FROM test_table", PARSING_OPTIONS), QUERY_STAGE); fail("Expect QueryException"); } catch (PrestoQueryException e) { assertFalse(e.isRetryable()); assertEquals(e.getErrorCodeName(), "PRESTO(SYNTAX_ERROR)"); assertTrue(e.getQueryActionStats().getQueryStats().isPresent()); assertEquals(e.getQueryActionStats().getQueryStats().map(QueryStats::getState).orElse(null), FAILED.name()); QueryFailure queryFailure = getOnlyElement(verificationContext.getQueryFailures()); assertEquals(queryFailure.getClusterType(), CONTROL.name()); assertEquals(queryFailure.getQueryStage(), QUERY_STAGE.name()); assertEquals(queryFailure.getErrorCode(), "PRESTO(SYNTAX_ERROR)"); assertFalse(queryFailure.isRetryable()); assertNotNull(queryFailure.getPrestoQueryId()); } }
IdBatchAndWaitTime newIdBaseLocal(int batchSize) { return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize); }
@Test public void when_fewIds_then_noWaitTime() { assertEquals(0, gen.newIdBaseLocal(1516028439000L, 1234, 100).waitTimeMillis); }
@VisibleForTesting static boolean checkPipelinedPath(ExecNode<?> node, Set<ExecNode<?>> goals) { PipelinedPathChecker checker = new PipelinedPathChecker(goals); node.accept(checker); return checker.res; }
@Test void testCheckPipelinedPath() { // P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT B = // InputProperty.DamBehavior.BLOCKING // // 0 -P-> 1 ----E----\ // \-P-\ \ // 2 ----E----> 3 -P-> 4 // 5 -P-> 6 -B-/ TestingBatchExecNode[] nodes = new TestingBatchExecNode[7]; for (int i = 0; i < nodes.length; i++) { nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i); } nodes[1].addInput(nodes[0]); nodes[3].addInput(nodes[1]); nodes[3].addInput( nodes[2], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); nodes[3].addInput( nodes[6], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build()); nodes[4].addInput( nodes[1], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); nodes[4].addInput(nodes[3]); nodes[6].addInput(nodes[5]); assertThat( InputOrderCalculator.checkPipelinedPath( nodes[4], new HashSet<>(Arrays.asList(nodes[2], nodes[5], nodes[6])))) .isFalse(); assertThat( InputOrderCalculator.checkPipelinedPath( nodes[4], new HashSet<>(Arrays.asList(nodes[0], nodes[2])))) .isTrue(); }
@Override public TimeLimiter timeLimiter(final String name) { return timeLimiter(name, getDefaultConfig(), emptyMap()); }
@Test @SuppressWarnings("unchecked") public void timeLimiterPositiveWithSupplier() { TimeLimiterRegistry registry = new InMemoryTimeLimiterRegistry(config); Supplier<TimeLimiterConfig> timeLimiterConfigSupplier = mock(Supplier.class); given(timeLimiterConfigSupplier.get()).willReturn(config); TimeLimiter firstTimeLimiter = registry.timeLimiter("test", timeLimiterConfigSupplier); verify(timeLimiterConfigSupplier, times(1)).get(); TimeLimiter sameAsFirst = registry.timeLimiter("test", timeLimiterConfigSupplier); verify(timeLimiterConfigSupplier, times(1)).get(); TimeLimiter anotherLimit = registry.timeLimiter("test1", timeLimiterConfigSupplier); verify(timeLimiterConfigSupplier, times(2)).get(); then(firstTimeLimiter).isEqualTo(sameAsFirst); then(firstTimeLimiter).isNotEqualTo(anotherLimit); }
public static <T> void runTasksWithDeadline(int numTasks, Function<Integer, T> taskFunc, Consumer<T> resCollector, Consumer<Exception> errHandler, ExecutorService executorService, long deadlineInMs) { Phaser phaser = new Phaser(1); List<Future<T>> futures = new ArrayList<>(numTasks); for (int i = 0; i < numTasks; i++) { int index = i; futures.add(executorService.submit(new TraceCallable<T>() { @Override public T callJob() { try { // Register the thread to the phaser for main thread to wait for it to complete. if (phaser.register() < 0) { return null; } return taskFunc.apply(index); } finally { phaser.arriveAndDeregister(); } } })); } try { // Check deadline while waiting for the task results. for (Future<T> future : futures) { T taskResult = future.get(deadlineInMs - System.currentTimeMillis(), TimeUnit.MILLISECONDS); resCollector.accept(taskResult); } } catch (Exception e) { errHandler.accept(e); } finally { // Cancel all ongoing jobs for (Future<T> future : futures) { if (!future.isDone()) { future.cancel(true); } } // Deregister the main thread and wait for all threads done phaser.awaitAdvance(phaser.arriveAndDeregister()); } }
@Test public void testRunTasksWithDeadline() { ExecutorService exec = Executors.newCachedThreadPool(); AtomicInteger sum = new AtomicInteger(0); QueryMultiThreadingUtils.runTasksWithDeadline(5, index -> index, sum::addAndGet, e -> { }, exec, System.currentTimeMillis() + 500); // sum of 0, 1, .., 4 indices. Assert.assertEquals(sum.get(), 10); // Task throws exception before timeout. Exception[] err = new Exception[1]; QueryMultiThreadingUtils.runTasksWithDeadline(5, index -> { throw new RuntimeException("oops: " + index); }, sum::addAndGet, e -> err[0] = e, exec, System.currentTimeMillis() + 500); Assert.assertTrue(err[0].getMessage().contains("oops")); // Task timed out. QueryMultiThreadingUtils.runTasksWithDeadline(5, index -> { try { Thread.sleep(10_000); return index; } catch (InterruptedException e) { throw new RuntimeException(e); } }, sum::addAndGet, e -> err[0] = e, exec, System.currentTimeMillis() + 500); Assert.assertTrue(err[0] instanceof TimeoutException); }
static String escape(String input) { return input.replace("\\", "\\\\").replace("(", "\\(").replace(")", "\\)"); }
@Test public void testEscapeEmptyString() { assertEquals("", ClientQuotasImageNode.escape("")); }
public static Sessions withGapDuration(Duration gapDuration) { return new Sessions(gapDuration); }
@Test public void testSimple() throws Exception { Map<IntervalWindow, Set<String>> expected = new HashMap<>(); expected.put(new IntervalWindow(new Instant(0), new Instant(10)), set(0)); expected.put(new IntervalWindow(new Instant(10), new Instant(20)), set(10)); expected.put(new IntervalWindow(new Instant(101), new Instant(111)), set(101)); assertEquals( expected, runWindowFn(Sessions.withGapDuration(Duration.millis(10)), Arrays.asList(0L, 10L, 101L))); }
public abstract byte[] encode(MutableSpan input);
@Test void rootServerSpan_JSON_V2_shared() { rootServerSpan.setShared(); assertThat(new String(encoder.encode(rootServerSpan), UTF_8)) .isEqualTo( "{\"traceId\":\"dc955a1d4768875d\",\"id\":\"dc955a1d4768875d\",\"kind\":\"SERVER\",\"name\":\"get\",\"timestamp\":1510256710021866,\"duration\":1117,\"localEndpoint\":{\"serviceName\":\"isao01\",\"ipv4\":\"10.23.14.72\"},\"tags\":{\"http.path\":\"/rs/A\",\"location\":\"T67792\",\"other\":\"A\"},\"shared\":true}"); }
@Override public void verify(String value) { long l = Long.parseLong(value); if (l < min || l > max) { throw new RuntimeException(format("value is not in range(%d, %d)", min, max)); } }
@Test public void verify_ValidValue_NoExceptionThrown() { longRangeAttribute.verify("50"); }
@Override public void build(final DefaultGoPublisher publisher, final EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { ExecutionResult executionResult = null; try { executionResult = taskExtension.execute(pluginId, (task, pluginDescriptor) -> executeTask(task, publisher, environmentVariableContext, consoleLogCharset)); } catch (Exception e) { logException(publisher, e); } finally { JobConsoleLoggerInternal.unsetContext(); } if (executionResult == null) { logError(publisher, "ExecutionResult cannot be null. Please return a success or a failure response."); } else if (!executionResult.isSuccessful()) { logError(publisher, executionResult.getMessagesForDisplay()); } }
@Test public void shouldInvokeTheTaskExecutorOfThePlugin() { final int[] executeTaskCalled = new int[1]; PluggableTaskBuilder taskBuilder = new PluggableTaskBuilder(runIfConfigs, cancelBuilder, pluggableTask, TEST_PLUGIN_ID, "test-directory") { @Override protected ExecutionResult executeTask(Task task, DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, Charset consoleLogCharset) { executeTaskCalled[0]++; return ExecutionResult.success("Test succeeded"); } }; taskBuilder.build(goPublisher, variableContext, taskExtension, null, null, UTF_8); assertThat(executeTaskCalled[0]).isEqualTo(1); }
@Override public <KR, VR> KStream<KR, VR> flatMap(final KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends KR, ? extends VR>>> mapper) { return flatMap(mapper, NamedInternal.empty()); }
@Test public void shouldNotAllowNullNamedOnFlatMap() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatMap((k, v) -> Collections.singleton(new KeyValue<>(k, v)), null)); assertThat(exception.getMessage(), equalTo("named can't be null")); }
@Override public OUT nextRecord(OUT record) throws IOException { OUT returnRecord = null; do { returnRecord = super.nextRecord(record); } while (returnRecord == null && !reachedEnd()); return returnRecord; }
@Test void testReadFirstN() { try { final String fileContent = "111|222|333|444|555|\n666|777|888|999|000|\n"; final FileInputSplit split = createTempFile(fileContent); final TupleTypeInfo<Tuple2<Integer, Integer>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(Integer.class, Integer.class); final CsvInputFormat<Tuple2<Integer, Integer>> format = new TupleCsvInputFormat<Tuple2<Integer, Integer>>(PATH, typeInfo); format.setFieldDelimiter("|"); format.configure(new Configuration()); format.open(split); Tuple2<Integer, Integer> result = new Tuple2<>(); result = format.nextRecord(result); assertThat(result.f0).isEqualTo(Integer.valueOf(111)); assertThat(result.f1).isEqualTo(Integer.valueOf(222)); result = format.nextRecord(result); assertThat(result.f0).isEqualTo(Integer.valueOf(666)); assertThat(result.f1).isEqualTo(Integer.valueOf(777)); result = format.nextRecord(result); assertThat(result).isNull(); assertThat(format.reachedEnd()).isTrue(); } catch (Exception ex) { fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage()); } }
public String getMetricsName() { return metricsName; }
@Test public void testGetMetricsName() { assertThat(endpoint.getMetricsName(), is(METRICS_NAME)); }
public ListenableFuture<RunResponse> runWithDeadline(RunRequest request, Deadline deadline) { return pluginService.withDeadline(deadline).run(request); }
@Test public void run_invalidRequest_returnNoDetectionReports() throws Exception { RunRequest runRequest = RunRequest.getDefaultInstance(); PluginServiceImplBase runImpl = new PluginServiceImplBase() { @Override public void run(RunRequest request, StreamObserver<RunResponse> responseObserver) { responseObserver.onNext(RunResponse.getDefaultInstance()); responseObserver.onCompleted(); } }; serviceRegistry.addService(runImpl); ListenableFuture<RunResponse> run = pluginService.runWithDeadline(runRequest, DEADLINE_DEFAULT); RunResponse runResponse = run.get(); assertThat(run.isDone()).isTrue(); assertThat(runResponse.hasReports()).isFalse(); }
public static boolean isAuthorizationEnabled(Configuration conf) { if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { return false; } // If the V2 api of authorizer in use, the session state getAuthorizer return null. // Here we disable authorization if we use V2 api or the DefaultHiveAuthorizationProvider // The additional authorization checks happening in hcatalog are designed to // work with storage based authorization (on client side). It should not try doing // additional checks if a V2 authorizer or DefaultHiveAuthorizationProvider is in use. // The recommended configuration is to use storage based authorization in metastore server. // However, if user define a custom V1 authorization, it will be honored. if (SessionState.get().getAuthorizer() == null || DefaultHiveAuthorizationProvider.class.getName().equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER))) { LOG.info("Metastore authorizer is skipped for V2 authorizer or" + " DefaultHiveAuthorizationProvider"); return false; } return true; }
@Test public void authEnabledV2Auth() throws Exception { HiveConf hcatConf = new HiveConfForTest(this.getClass()); hcatConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); hcatConf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, DummyV2AuthorizerFactory.class.getName()); SessionState.start(hcatConf); assertFalse("hcat auth should be disabled", HCatAuthUtil.isAuthorizationEnabled(hcatConf)); }
@Operation(summary = "queryProjectParameterByCode", description = "QUERY_PROJECT_PARAMETER_NOTES") @Parameters({ @Parameter(name = "code", description = "PROJECT_PARAMETER_CODE", schema = @Schema(implementation = long.class, example = "123456")) }) @GetMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROJECT_PARAMETER_ERROR) public Result queryProjectParameterByCode(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code) { return projectParameterService.queryProjectParameterByCode(loginUser, projectCode, code); }
@Test public void testQueryProjectParameterByCode() { User loginUser = getGeneralUser(); Mockito.when(projectParameterService.queryProjectParameterByCode(Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(getSuccessResult()); Result result = projectParameterController.queryProjectParameterByCode(loginUser, 1, 1); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode()); }
@Override public String method() { return "GET"; }
@Test public void method() { assertThat(underTest.method()).isEqualTo("GET"); underTest.setParam("foo", "bar"); assertThat(underTest.param("foo")).isEqualTo("bar"); assertThat(underTest.param("unknown")).isNull(); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { if(new B2ThresholdUploadService(session, fileid, threshold).threshold(status)) { return new B2LargeCopyFeature(session, fileid).copy(source, target, status, callback, listener); } else { return new B2CopyFeature(session, fileid).copy(source, target, status, callback, listener); } }
@Test public void testCopyFileSizeGreaterPartSize() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path container = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String name = new AlphanumericRandomStringService().random(); final byte[] content = RandomUtils.nextBytes(6 * 1000 * 1000); final Path test = new Path(container, name, EnumSet.of(Path.Type.file)); final OutputStream out = new B2WriteFeature(session, fileid).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus().withLength(content.length)).transfer(new ByteArrayInputStream(content), out); out.close(); assertTrue(new B2FindFeature(session, fileid).find(test)); final B2ThresholdCopyFeature feature = new B2ThresholdCopyFeature(session, fileid, 5 * 1000L * 1000L); final Path copy = feature.copy(test, new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLength(content.length), new DisabledConnectionCallback(), new DisabledStreamListener()); assertNotEquals(test.attributes().getVersionId(), copy.attributes().getVersionId()); assertTrue(new B2FindFeature(session, fileid).find(new Path(container, name, EnumSet.of(Path.Type.file)))); assertTrue(new B2FindFeature(session, fileid).find(copy)); final byte[] compare = new byte[content.length]; final InputStream stream = new B2ReadFeature(session, fileid).read(copy, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new B2DeleteFeature(session, fileid).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override protected double maintain() { NodeList allNodes; // Host and child nodes are written in separate transactions, but both are written while holding the // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children // of newly provisioned hosts. try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) { allNodes = nodeRepository().nodes().list(); } NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost); int failures = 0; for (Node host : hosts) { try { HostIpConfig hostIpConfig = hostProvisioner.provision(host); setIpConfig(host, hostIpConfig); } catch (IllegalArgumentException | IllegalStateException e) { log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e)); } catch (ThrottleProvisioningException e) { log.log(Level.INFO, "Failed to provision " + host.hostname() + ", will retry in " + interval() + ": " + e.getMessage()); break; } catch (FatalProvisioningException e) { // FatalProvisioningException is thrown if node is not found in the cloud, allow for // some time for the state to propagate if (host.history().age(clock().instant()).getSeconds() < 30) continue; failures++; log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e); nodeRepository().nodes().parkRecursively( host.hostname(), Agent.HostResumeProvisioner, true, "Failed by HostResumeProvisioner due to provisioning failure"); } catch (RuntimeException e) { if (e.getCause() instanceof NamingException) log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e)); else { failures++; log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e); } } } return asSuccessFactorDeviation(hosts.size(), failures); }
@Test public void defer_writing_ip_addresses_until_dns_resolves() { deployApplication(); hostProvisioner.with(MockHostProvisioner.Behaviour.failDnsUpdate); Supplier<NodeList> provisioning = () -> tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host); assertEquals(1, provisioning.get().size()); hostResumeProvisioner.maintain(); assertTrue("No IP addresses written as DNS updates are failing", provisioning.get().stream().allMatch(host -> host.ipConfig().pool().ips().isEmpty())); hostProvisioner.without(MockHostProvisioner.Behaviour.failDnsUpdate); hostResumeProvisioner.maintain(); assertTrue("IP addresses written as DNS updates are succeeding", provisioning.get().stream().noneMatch(host -> host.ipConfig().pool().ips().isEmpty())); }
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) { Set<Integer> podIdsToRestart = new HashSet<>(); List<Future<Void>> futures = new ArrayList<>(pvcs.size()); for (PersistentVolumeClaim desiredPvc : pvcs) { Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName()) .compose(currentPvc -> { if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) { // This branch handles the following conditions: // * The PVC doesn't exist yet, we should create it // * The PVC is not Bound, we should reconcile it return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage")); Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage")); if (!currentSize.equals(desiredSize)) { // The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that) return resizePvc(kafkaStatus, currentPvc, desiredPvc); } else { // size didn't change, just reconcile return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } } }); futures.add(perPvcFuture); } return Future.all(futures) .map(podIdsToRestart); }
@Test public void testVolumesBoundExpandableStorageClassWithInvalidSize(VertxTestContext context) { List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null); if (currentPvc != null) { PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc) .editSpec() .withNewResources() .withRequests(Map.of("storage", new Quantity("-50000000000200Gi", null))) .endResources() .endSpec() .withNewStatus() .withPhase("Bound") .withCapacity(Map.of("storage", new Quantity("50Gi", null))) .endStatus() .build(); return Future.succeededFuture(pvcWithStatus); } else { return Future.succeededFuture(); } }); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(RESIZABLE_STORAGE_CLASS)); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, mockSco ); Checkpoint async = context.checkpoint(); reconciler.resizeAndReconcilePvcs(new KafkaStatus(), pvcs) .onComplete(res -> { assertThat(res.succeeded(), is(false)); assertThat(res.cause(), is(instanceOf(IllegalArgumentException.class))); assertThat(res.cause().getMessage(), is("Invalid memory suffix: -50000000000200Gi")); async.flag(); }); }
public OpenConfigTerminalDeviceHandler addLogicalChannels( OpenConfigLogicalChannelsHandler logicalChannels) { modelObject.logicalChannels(logicalChannels.getModelObject()); return this; }
@Test public void testAddLogicalChannels() { // test Handler OpenConfigTerminalDeviceHandler terminalDevice = new OpenConfigTerminalDeviceHandler(); // call addLogicalChannels OpenConfigLogicalChannelsHandler logicalChannels = new OpenConfigLogicalChannelsHandler(terminalDevice); // expected ModelObject DefaultTerminalDevice modelObject = new DefaultTerminalDevice(); DefaultLogicalChannels logicalChan = new DefaultLogicalChannels(); modelObject.logicalChannels(logicalChan); assertEquals("[NG]addLogicalChannels:ModelObject(LogicalChannels added) is not an expected one.\n", modelObject, terminalDevice.getModelObject()); }
@Override @Nullable public double[] readDoubleArray() throws EOFException { int len = readInt(); if (len == NULL_ARRAY_LENGTH) { return null; } if (len > 0) { double[] values = new double[len]; for (int i = 0; i < len; i++) { values[i] = readDouble(); } return values; } return new double[0]; }
@Test public void testReadDoubleArray() throws Exception { byte[] bytesBE = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1}; byte[] bytesLE = {0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1}; in.init((byteOrder == BIG_ENDIAN ? bytesBE : bytesLE), 0); in.position(bytesLE.length - 4); double[] theNullArray = in.readDoubleArray(); in.position(0); double[] theZeroLengthArray = in.readDoubleArray(); in.position(4); double[] doubles = in.readDoubleArray(); assertNull(theNullArray); assertArrayEquals(new double[0], theZeroLengthArray, 0); assertArrayEquals(new double[]{Double.longBitsToDouble(1)}, doubles, 0); }
public double cost() { return nodes * nodeResources.cost(); }
@Test void testCost() { ClusterResources r1 = new ClusterResources(3, 1, new NodeResources(2, 8, 50, 1)); ClusterResources r2 = new ClusterResources(3, 1, new NodeResources(2, 16, 50, 1)); assertEquals(2.232, r1.cost() + r2.cost(), 0.01); }
@Override public boolean supportsMultipleOpenResults() { return false; }
@Test void assertSupportsMultipleOpenResults() { assertFalse(metaData.supportsMultipleOpenResults()); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { String highwayValue = way.getTag("highway"); if (skipEmergency && "service".equals(highwayValue) && "emergency_access".equals(way.getTag("service"))) return; int firstIndex = way.getFirstIndex(restrictionKeys); String firstValue = firstIndex < 0 ? "" : way.getTag(restrictionKeys.get(firstIndex), ""); if (restrictedValues.contains(firstValue) && !hasTemporalRestriction(way, firstIndex, restrictionKeys)) return; if (way.hasTag("gh:barrier_edge") && way.hasTag("node_tags")) { List<Map<String, Object>> nodeTags = way.getTag("node_tags", null); Map<String, Object> firstNodeTags = nodeTags.get(0); // a barrier edge has the restriction in both nodes and the tags are the same -> get(0) firstValue = getFirstPriorityNodeTag(firstNodeTags, restrictionKeys); String barrierValue = firstNodeTags.containsKey("barrier") ? (String) firstNodeTags.get("barrier") : ""; if (restrictedValues.contains(firstValue) || barriers.contains(barrierValue) || "yes".equals(firstNodeTags.get("locked")) && !INTENDED.contains(firstValue)) return; } if (FerrySpeedCalculator.isFerry(way)) { boolean isCar = restrictionKeys.contains("motorcar"); if (INTENDED.contains(firstValue) // implied default is allowed only if foot and bicycle is not specified: || isCar && firstValue.isEmpty() && !way.hasTag("foot") && !way.hasTag("bicycle") // if hgv is allowed then smaller trucks and cars are allowed too even if not specified || isCar && way.hasTag("hgv", "yes")) { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } else { boolean isRoundabout = roundaboutEnc.getBool(false, edgeId, edgeIntAccess); boolean ignoreOneway = "no".equals(way.getFirstValue(ignoreOnewayKeys)); boolean isBwd = isBackwardOneway(way); if (!ignoreOneway && (isBwd || isRoundabout || isForwardOneway(way))) { accessEnc.setBool(isBwd, edgeId, edgeIntAccess, true); } else { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } }
@Test public void testBarrier() { ReaderWay way = new ReaderWay(1); way.setTag("highway", "secondary"); way.setTag("gh:barrier_edge", true); way.setTag("node_tags", Arrays.asList(Map.of("barrier", "bollard"), Map.of())); EdgeIntAccess access = new ArrayEdgeIntAccess(1); int edgeId = 0; parser.handleWayTags(edgeId, access, way, null); assertFalse(busAccessEnc.getBool(false, edgeId, access)); way.setTag("node_tags", Arrays.asList(Map.of("barrier", "gate"), Map.of())); access = new ArrayEdgeIntAccess(1); parser.handleWayTags(edgeId, access, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, access)); // this special mode ignores all barriers except kissing_gate BooleanEncodedValue tmpAccessEnc = new SimpleBooleanEncodedValue("tmp_access", true); EncodingManager tmpEM = new EncodingManager.Builder().add(tmpAccessEnc).add(Roundabout.create()).build(); ModeAccessParser tmpParser = new ModeAccessParser(TransportationMode.CAR, tmpAccessEnc, true, tmpEM.getBooleanEncodedValue(Roundabout.KEY), Set.of(), Set.of("kissing_gate")); way = new ReaderWay(1); way.setTag("highway", "secondary"); way.setTag("gh:barrier_edge", true); way.setTag("node_tags", List.of(Map.of("barrier", "bollard"), Map.of())); access = new ArrayEdgeIntAccess(1); tmpParser.handleWayTags(edgeId, access, way, null); assertTrue(tmpAccessEnc.getBool(false, edgeId, access)); }
public static boolean checkUrl(final String url) { return checkUrl(url, DEFAULT_TIMEOUT); }
@Test @Disabled public void testSocketConnect() { Runnable runnable = () -> { ServerSocket serverSocket; try { serverSocket = new ServerSocket(0); port = serverSocket.getLocalPort(); Socket socket = serverSocket.accept(); socket.close(); } catch (IOException e) { LOG.error(e.getMessage()); } }; new Thread(runnable).start(); while (port == -1) { Thread.yield(); } assertTrue(UpstreamCheckUtils.checkUrl("127.0.0.1:" + port)); assertFalse(UpstreamCheckUtils.checkUrl("http://127.0.0.1:" + (port == 0 ? port + 1 : port - 1))); assertTrue(UpstreamCheckUtils.checkUrl("http://127.0.0.1:" + port)); assertTrue(UpstreamCheckUtils.checkUrl("https://shenyu.apache.org")); }
public static Builder newBuilder() { return new Builder(); }
@Test public void equals_is_based_on_all_fields() { EvaluatedQualityGate.Builder builder = this.builder .setQualityGate(ONE_CONDITION_QUALITY_GATE) .setStatus(Level.ERROR) .addEvaluatedCondition(CONDITION_1, EvaluatedCondition.EvaluationStatus.ERROR, "foo"); EvaluatedQualityGate underTest = builder.build(); assertThat(underTest) .isEqualTo(builder.build()) .isNotSameAs(builder.build()) .isNotNull() .isNotEqualTo(new Object()) .isNotEqualTo(builder.setQualityGate(new QualityGate("other_id", QUALITY_GATE_NAME, singleton(CONDITION_1))).build()) .isNotEqualTo(builder.setQualityGate(ONE_CONDITION_QUALITY_GATE).setStatus(Level.OK).build()) .isNotEqualTo(newBuilder() .setQualityGate(ONE_CONDITION_QUALITY_GATE) .setStatus(Level.ERROR) .addEvaluatedCondition(CONDITION_1, EvaluatedCondition.EvaluationStatus.OK, "foo") .build()); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@TestTemplate public void testUnpartitionedOr() throws Exception { createUnpartitionedTable(spark, tableName); SparkScanBuilder builder = scanBuilder(); YearsFunction.TimestampToYearsFunction tsToYears = new YearsFunction.TimestampToYearsFunction(); UserDefinedScalarFunc udf1 = toUDF(tsToYears, expressions(fieldRef("ts"))); Predicate predicate1 = new Predicate("=", expressions(udf1, intLit(2017 - 1970))); BucketFunction.BucketLong bucketLong = new BucketFunction.BucketLong(DataTypes.LongType); UserDefinedScalarFunc udf = toUDF(bucketLong, expressions(intLit(5), fieldRef("id"))); Predicate predicate2 = new Predicate(">=", expressions(udf, intLit(2))); Predicate predicate = new Or(predicate1, predicate2); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); // NOT (years(ts) = 47 OR bucket(id, 5) >= 2) builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); }
static String encodeElement(Object obj, URLEscaper.Escaping escaping, UriComponent.Type componentType) { StringBuilder builder = new StringBuilder(); encodeDataObject(obj, escaping, componentType, builder); return builder.toString(); }
@Test(dataProvider = "encoding") public void testEncoding(Object obj, String expectedNoEsc, String expectedPathSegEsc, String expectedQueryParamEsc) { String actualNoEsc = URIParamUtils.encodeElement(obj, NO_ESCAPING, null); Assert.assertEquals(actualNoEsc, expectedNoEsc); String actualPathSegEsc = URIParamUtils.encodeElement(obj, URL_ESCAPING, UriComponent.Type.PATH_SEGMENT); Assert.assertEquals(actualPathSegEsc, expectedPathSegEsc); String actualQueryParamEsc = URIParamUtils.encodeElement(obj, URL_ESCAPING, UriComponent.Type.QUERY_PARAM); Assert.assertEquals(actualQueryParamEsc, expectedQueryParamEsc); }
public WorkflowInstance createWorkflowInstance( Workflow workflowDef, Long internalId, long workflowVersionId, RunProperties runProperties, RunRequest runRequest) { WorkflowInstance instance = new WorkflowInstance(); instance.setWorkflowId(workflowDef.getId()); instance.setInternalId(internalId); instance.setWorkflowVersionId(workflowVersionId); // latest workflow instance id is unknown, update it later. instance.setWorkflowInstanceId(Constants.LATEST_ONE); // set correlation id if request contains it, otherwise, update it later inside DAO instance.setCorrelationId(runRequest.getCorrelationId()); instance.setRunProperties(runProperties); // it includes runtime params and tags. Its dag is versioned dag. Workflow workflow = overrideWorkflowConfig(workflowDef, runRequest); instance.setRuntimeWorkflow(workflow); // update newly created workflow instance updateWorkflowInstance(instance, runRequest); return instance; }
@Test public void testCreateWorkflowInstanceWithInstanceStepConcurrencyOverride() { RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .instanceStepConcurrency(10L) .build(); Workflow workflow = definition.getWorkflow(); workflow = workflow.toBuilder().instanceStepConcurrency(20L).build(); WorkflowInstance instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(workflow.getId(), instance.getWorkflowId()); assertEquals(WorkflowInstance.Status.CREATED, instance.getStatus()); assertNotNull(instance.getParams()); assertNotNull(instance.getWorkflowUuid()); assertEquals(10L, instance.getRuntimeWorkflow().getInstanceStepConcurrency().longValue()); assertEquals(12345L, instance.getInternalId().longValue()); Mockito.verify(paramsManager, Mockito.times(1)).generateMergedWorkflowParams(any(), any()); }
@ApiOperation(value = "Create a processing rule from rule builder", notes = "") @POST @RequiresPermissions(PipelineRestPermissions.PIPELINE_RULE_CREATE) @AuditEvent(type = PipelineProcessorAuditEventTypes.RULE_CREATE) public RuleBuilderDto createFromBuilder(@ApiParam(name = "rule", required = true) @NotNull RuleBuilderDto ruleBuilderDto) { try { validatorService.validateAndFailFast(ruleBuilderDto); } catch (IllegalArgumentException exception) { throw new BadRequestException(exception.getMessage()); } RuleSource ruleSource = toRuleSource(ruleBuilderDto, false); final RuleSource stored = ruleResource.createFromParser(ruleSource); return ruleBuilderDto.toBuilder() .id(stored.id()) .build(); }
@Test public void ruleParsedAndStoredByRuleResource() { when(ruleBuilderService.generateRuleSource(any(), any(RuleBuilder.class), anyBoolean())) .thenReturn("rulesource"); when(ruleResource.createFromParser(any())).thenReturn(RuleSource.builder().id("new_id").source("rulesource").build()); RuleBuilder ruleBuilder = RuleBuilder.builder().build(); when(ruleBuilderService.generateTitles(any())).thenReturn(ruleBuilder); RuleBuilderDto toSave = RuleBuilderDto.builder() .title("title") .ruleBuilder(ruleBuilder) .build(); final RuleBuilderDto saved = ruleBuilderResource.createFromBuilder(toSave); assertThat(saved.id()).isEqualTo("new_id"); verify(ruleResource).createFromParser(any()); }
@Override public NodeHealth get() { NodeHealth.Builder builder = NodeHealth.newNodeHealthBuilder(); if (clusterAppState.isOperational(ProcessId.ELASTICSEARCH, true)) { builder.setStatus(NodeHealth.Status.GREEN); } else { builder.setStatus(NodeHealth.Status.RED) .addCause("Elasticsearch is not operational"); } return builder .setDetails(nodeDetails) .build(); }
@Test public void get_returns_started_from_System2_now_at_constructor_time() { Properties properties = new Properties(); long now = setRequiredPropertiesAndMocks(properties); SearchNodeHealthProvider underTest = new SearchNodeHealthProvider(new Props(properties), clusterAppState, networkUtils, clock); NodeHealth nodeHealth = underTest.get(); assertThat(nodeHealth.getDetails().getStartedAt()).isEqualTo(now); // change now when(clock.now()).thenReturn(now); NodeHealth newNodeHealth = underTest.get(); assertThat(newNodeHealth.getDetails().getStartedAt()).isEqualTo(now); }
public QueueConfig setAsyncBackupCount(int asyncBackupCount) { this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount); return this; }
@Test public void setAsyncBackupCount_whenItsZero() { queueConfig.setAsyncBackupCount(0); }
public AggregateAnalysisResult analyze( final ImmutableAnalysis analysis, final List<SelectExpression> finalProjection ) { if (!analysis.getGroupBy().isPresent()) { throw new IllegalArgumentException("Not an aggregate query"); } final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry); aggAnalyzer.process(finalProjection); return aggAnalyzer.result(); }
@Test public void shouldCaptureGroupByNonAggregateFunctionArgumentsAsRequired() { // Given: givenGroupByExpressions(GROUP_BY_0, GROUP_BY_1); // When: final AggregateAnalysisResult result = analyzer.analyze(analysis, selects); // Then: assertThat(result.getRequiredColumns(), hasItems(GROUP_BY_0, GROUP_BY_1)); }
public void setupSuggestionsForKeyboard( @NonNull List<DictionaryAddOnAndBuilder> dictionaryBuilders, @NonNull DictionaryBackgroundLoader.Listener cb) { if (BuildConfig.TESTING_BUILD) { Logger.d(TAG, "setupSuggestionsFor %d dictionaries", dictionaryBuilders.size()); for (DictionaryAddOnAndBuilder dictionaryBuilder : dictionaryBuilders) { Logger.d( TAG, " * dictionary %s (%s)", dictionaryBuilder.getId(), dictionaryBuilder.getLanguage()); } } final int newSetupHashCode = calculateHashCodeForBuilders(dictionaryBuilders); if (newSetupHashCode == mCurrentSetupHashCode) { // no need to load, since we have all the same dictionaries, // but, we do need to notify the dictionary loaded listeners. final List<Dictionary> dictionariesToSimulateLoad = new ArrayList<>(mMainDictionary.size() + mUserDictionary.size() + 1 /*for contacts*/); dictionariesToSimulateLoad.addAll(mMainDictionary); dictionariesToSimulateLoad.addAll(mUserDictionary); if (mContactsDictionaryEnabled) dictionariesToSimulateLoad.add(mContactsDictionary); for (Dictionary dictionary : dictionariesToSimulateLoad) { cb.onDictionaryLoadingStarted(dictionary); } for (Dictionary dictionary : dictionariesToSimulateLoad) { cb.onDictionaryLoadingDone(dictionary); } return; } close(); mCurrentSetupHashCode = newSetupHashCode; final CompositeDisposable disposablesHolder = mDictionaryDisposables; for (int i = 0; i < dictionaryBuilders.size(); i++) { DictionaryAddOnAndBuilder dictionaryBuilder = dictionaryBuilders.get(i); try { Logger.d( TAG, " Creating dictionary %s (%s)...", dictionaryBuilder.getId(), dictionaryBuilder.getLanguage()); final Dictionary dictionary = dictionaryBuilder.createDictionary(); mMainDictionary.add(dictionary); Logger.d( TAG, " Loading dictionary %s (%s)...", dictionaryBuilder.getId(), dictionaryBuilder.getLanguage()); disposablesHolder.add( DictionaryBackgroundLoader.loadDictionaryInBackground(cb, dictionary)); } catch (Exception e) { Logger.e(TAG, e, "Failed to create dictionary %s", dictionaryBuilder.getId()); } if (mUserDictionaryEnabled) { final UserDictionary userDictionary = createUserDictionaryForLocale(dictionaryBuilder.getLanguage()); mUserDictionary.add(userDictionary); Logger.d(TAG, " Loading user dictionary for %s...", dictionaryBuilder.getLanguage()); disposablesHolder.add( DictionaryBackgroundLoader.loadDictionaryInBackground(cb, userDictionary)); mUserNextWordDictionary.add(userDictionary.getUserNextWordGetter()); } else { Logger.d(TAG, " User does not want user dictionary, skipping..."); } // if mQuickFixesEnabled and mQuickFixesSecondDisabled are true // it activates autotext only to the current keyboard layout language if (mQuickFixesEnabled && (i == 0 || !mQuickFixesSecondDisabled)) { final AutoText autoText = dictionaryBuilder.createAutoText(); if (autoText != null) { mQuickFixesAutoText.add(autoText); } final AbbreviationsDictionary abbreviationsDictionary = new AbbreviationsDictionary(mContext, dictionaryBuilder.getLanguage()); mAbbreviationDictionary.add(abbreviationsDictionary); Logger.d(TAG, " Loading abbr dictionary for %s...", dictionaryBuilder.getLanguage()); disposablesHolder.add( DictionaryBackgroundLoader.loadDictionaryInBackground(abbreviationsDictionary)); } mInitialSuggestionsList.addAll(dictionaryBuilder.createInitialSuggestions()); // only one auto-dictionary. There is no way to know to which language the typed word // belongs. mAutoDictionary = new AutoDictionary(mContext, dictionaryBuilder.getLanguage()); Logger.d(TAG, " Loading auto dictionary for %s...", dictionaryBuilder.getLanguage()); disposablesHolder.add(DictionaryBackgroundLoader.loadDictionaryInBackground(mAutoDictionary)); } if (mContactsDictionaryEnabled && mContactsDictionary == NullDictionary) { mContactsDictionaryListener.mDelegate = cb; final ContactsDictionary realContactsDictionary = createRealContactsDictionary(); mContactsDictionary = realContactsDictionary; mContactsNextWordDictionary = realContactsDictionary; disposablesHolder.add( DictionaryBackgroundLoader.loadDictionaryInBackground( mContactsDictionaryListener, mContactsDictionary)); } }
@Test public void testPassesWordsLoadedListenerToDictionaries() throws Exception { mSuggestionsProvider.setupSuggestionsForKeyboard(mFakeBuilders, mMockListener); TestRxSchedulers.drainAllTasks(); Assert.assertNotNull(mFakeContactsDictionary); final InOrder inOrder = Mockito.inOrder(mMockListener); inOrder.verify(mMockListener).onDictionaryLoadingStarted(mFakeBuilder.mSpiedDictionary); inOrder.verify(mMockListener).onDictionaryLoadingStarted(mTestUserDictionary); inOrder.verify(mMockListener).onDictionaryLoadingStarted(mFakeContactsDictionary); inOrder.verify(mMockListener).onDictionaryLoadingDone(mFakeBuilder.mSpiedDictionary); inOrder.verify(mMockListener).onDictionaryLoadingDone(mTestUserDictionary); inOrder.verify(mMockListener).onDictionaryLoadingDone(mFakeContactsDictionary); inOrder.verifyNoMoreInteractions(); }
public static String compareMd5ResultString(List<String> changedGroupKeys) throws IOException { if (null == changedGroupKeys) { return ""; } StringBuilder sb = new StringBuilder(); for (String groupKey : changedGroupKeys) { String[] dataIdGroupId = GroupKey.parseKey(groupKey); sb.append(dataIdGroupId[0]); sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[1]); // if have tenant, then set it if (dataIdGroupId.length == SIZE_4) { if (StringUtil.isNotBlank(dataIdGroupId[2])) { sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[2]); } } sb.append(LINE_SEPARATOR); } return URLEncoder.encode(sb.toString(), "UTF-8"); }
@Test public void compareMd5ResultStringEmptyTest() { String key = null; try { key = Md5ConfigUtil.compareMd5ResultString(new ArrayList<>()); } catch (IOException ignored) { } Assert.isTrue(Objects.equals(StringUtil.EMPTY, key)); }
public AlterSourceCommand create(final AlterSource statement) { final DataSource dataSource = metaStore.getSource(statement.getName()); final String dataSourceType = statement.getDataSourceType().getKsqlType(); if (dataSource != null && dataSource.isSource()) { throw new KsqlException( String.format("Cannot alter %s '%s': ALTER operations are not supported on source %s.", dataSourceType.toLowerCase(), statement.getName().text(), dataSourceType.toLowerCase() + "s")); } final List<Column> newColumns = statement .getAlterOptions() .stream() .map( alterOption -> Column.of( ColumnName.of(alterOption.getColumnName()), alterOption.getType().getSqlType(), Namespace.VALUE, 0)) .collect(Collectors.toList()); return new AlterSourceCommand( statement.getName(), dataSourceType, newColumns ); }
@Test public void shouldThrowInAlterOnSourceStream() { // Given: final AlterSource alterSource = new AlterSource(STREAM_NAME, DataSourceType.KSTREAM, NEW_COLUMNS); when(ksqlStream.isSource()).thenReturn(true); // When: final Exception e = assertThrows( KsqlException.class, () -> alterSourceFactory.create(alterSource)); // Then: assertThat(e.getMessage(), containsString( "Cannot alter stream 'streamname': ALTER operations are not supported on " + "source streams.")); }
public void untrack(Tuple2<JobID, BlobKey> key) { checkNotNull(key); checkNotNull(key.f0); checkNotNull(key.f1); synchronized (lock) { blobKeyByJob.computeIfAbsent(key.f0, ignore -> new HashSet<>()).remove(key.f1); Long size = caches.remove(key); if (size != null) { checkState(size >= 0); total -= size; } } }
@Test void testUntrack() { assertThat(tracker.checkLimit(3L)).hasSize(1); tracker.untrack(Tuple2.of(jobId, blobKey)); assertThat(tracker.getSize(jobId, blobKey)).isNull(); assertThat(tracker.getBlobKeysByJobId(jobId)).isEmpty(); assertThat(tracker.checkLimit(3L)).isEmpty(); }
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = buildOutputMap(transformations); final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>(); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations); while (!toTransformQueue.isEmpty()) { final Transformation<?> transformation = toTransformQueue.poll(); if (!alreadyTransformed.contains(transformation)) { alreadyTransformed.add(transformation); final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap); chainedTransformations.add(chainInfo.newTransformation); chainedTransformations.removeAll(chainInfo.oldTransformations); alreadyTransformed.addAll(chainInfo.oldTransformations); // Add the chained transformation and its inputs to the to-optimize list toTransformQueue.add(chainInfo.newTransformation); toTransformQueue.addAll(chainInfo.newTransformation.getInputs()); } } return new ArrayList<>(chainedTransformations); }
@Test void testSingleTransformation() { ExternalPythonKeyedProcessOperator<?> keyedProcessOperator = createKeyedProcessOperator( "f1", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); ExternalPythonProcessOperator<?, ?> processOperator1 = createProcessOperator("f2", Types.STRING(), Types.LONG()); ExternalPythonProcessOperator<?, ?> processOperator2 = createProcessOperator("f3", Types.LONG(), Types.INT()); Transformation<?> sourceTransformation = mock(SourceTransformation.class); OneInputTransformation<?, ?> keyedProcessTransformation = new OneInputTransformation( sourceTransformation, "keyedProcess", keyedProcessOperator, keyedProcessOperator.getProducedType(), 2); Transformation<?> processTransformation1 = new OneInputTransformation( keyedProcessTransformation, "process", processOperator1, processOperator1.getProducedType(), 2); Transformation<?> processTransformation2 = new OneInputTransformation( processTransformation1, "process", processOperator2, processOperator2.getProducedType(), 2); List<Transformation<?>> transformations = new ArrayList<>(); transformations.add(processTransformation2); List<Transformation<?>> optimized = PythonOperatorChainingOptimizer.optimize(transformations); assertThat(optimized).hasSize(2); OneInputTransformation<?, ?> chainedTransformation = (OneInputTransformation<?, ?>) optimized.get(0); assertThat(sourceTransformation.getOutputType()) .isEqualTo(chainedTransformation.getInputType()); assertThat(processOperator2.getProducedType()) .isEqualTo(chainedTransformation.getOutputType()); OneInputStreamOperator<?, ?> chainedOperator = chainedTransformation.getOperator(); assertThat(chainedOperator).isInstanceOf(ExternalPythonKeyedProcessOperator.class); validateChainedPythonFunctions( ((ExternalPythonKeyedProcessOperator<?>) chainedOperator).getPythonFunctionInfo(), "f3", "f2", "f1"); }
public static void parseSAX(InputStream is, ContentHandler contentHandler, ParseContext context) throws TikaException, IOException, SAXException { SAXParser saxParser = context.get(SAXParser.class); PoolSAXParser poolSAXParser = null; if (saxParser == null) { poolSAXParser = acquireSAXParser(); saxParser = poolSAXParser.getSAXParser(); } try { saxParser.parse(is, new OfflineContentHandler(contentHandler)); } finally { if (poolSAXParser != null) { releaseParser(poolSAXParser); } } }
@Test public void testExternalDTD() throws Exception { String xml = "<!DOCTYPE foo SYSTEM \"http://127.234.172.38:7845/bar\"><foo/>"; try { XMLReaderUtils.parseSAX(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)), new ToTextContentHandler(), new ParseContext()); } catch (ConnectException e) { fail("Parser tried to access the external DTD:" + e); } }
public Set<String> validate(String expr, Set<String> whitelistVars) throws Exception { checkExprLength(expr); selParser.ReInit(new ByteArrayInputStream(expr.getBytes())); ASTExecute n = selParser.Execute(); Map<String, Boolean> vars = new HashMap<>(); n.jjtAccept(validator, vars); Set<String> res = new HashSet<>(); for (Map.Entry<String, Boolean> entry : vars.entrySet()) { if (entry.getValue() && !whitelistVars.contains(entry.getKey())) { res.add(entry.getKey()); } } return res; }
@Test(expected = ParseException.class) public void testInvalidate() throws Exception { t1.validate("invalid();", new HashSet<>()); }
public <T> Future<Iterable<T>> multimapFetchSingleEntryFuture( ByteString encodedKey, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) { StateTag<ByteString> stateTag = StateTag.<ByteString>of(Kind.MULTIMAP_SINGLE_ENTRY, encodedTag, stateFamily) .toBuilder() .setMultimapKey(encodedKey) .build(); return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder)); }
@Test public void testReadMultimapMultipleEntriesWithPagination() throws Exception { Future<Iterable<Integer>> future1 = underTest.multimapFetchSingleEntryFuture( STATE_MULTIMAP_KEY_1, STATE_KEY_1, STATE_FAMILY, INT_CODER); Future<Iterable<Integer>> future2 = underTest.multimapFetchSingleEntryFuture( STATE_MULTIMAP_KEY_2, STATE_KEY_1, STATE_FAMILY, INT_CODER); Mockito.verifyNoMoreInteractions(mockWindmill); Windmill.KeyedGetDataRequest.Builder expectedRequest1 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(false) .addEntriesToFetch( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_1) .setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES) .build()) .addEntriesToFetch( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_2) .setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES) .build())); Windmill.KeyedGetDataResponse.Builder response1 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_1) .addAllValues(Arrays.asList(intData(5), intData(6))) .setContinuationPosition(800)) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_2) .addAllValues(Arrays.asList(intData(15), intData(16))))); Windmill.KeyedGetDataRequest.Builder expectedRequest2 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(false) .addEntriesToFetch( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_1) .setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES) .setRequestPosition(800) .build())); Windmill.KeyedGetDataResponse.Builder response2 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_1) .addAllValues(Arrays.asList(intData(7), intData(8))) .setRequestPosition(800))); when(mockWindmill.getStateData(ArgumentMatchers.eq(COMPUTATION), ArgumentMatchers.any())) .thenReturn(response1.build()) .thenReturn(response2.build()); Iterable<Integer> results1 = future1.get(); Iterable<Integer> results2 = future2.get(); assertThat(results1, Matchers.containsInAnyOrder(5, 6, 7, 8)); assertThat(results2, Matchers.containsInAnyOrder(15, 16)); final ArgumentCaptor<Windmill.KeyedGetDataRequest> requestCaptor = ArgumentCaptor.forClass(Windmill.KeyedGetDataRequest.class); Mockito.verify(mockWindmill, times(2)) .getStateData(ArgumentMatchers.eq(COMPUTATION), requestCaptor.capture()); assertMultimapFetchRequestEqual( expectedRequest1.build().getMultimapsToFetch(0), requestCaptor.getAllValues().get(0).getMultimapsToFetch(0)); assertMultimapFetchRequestEqual( expectedRequest2.build().getMultimapsToFetch(0), requestCaptor.getAllValues().get(1).getMultimapsToFetch(0)); Mockito.verifyNoMoreInteractions(mockWindmill); // NOTE: The future will still contain a reference to the underlying reader, thus not calling // assertNoReader(future). }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testStoredViewResolution() { // the view must be analyzed relative to its own catalog/schema analyze("SELECT * FROM c3.s3.v3"); }
static <T> Callable<T> decorateCallable(Observation observation, Callable<T> callable) { return () -> { observation.start(); try { return callable.call(); } catch (Exception e) { observation.error(e); throw e; } finally { observation.stop(); } }; }
@Test public void shouldDecorateCallable() throws Throwable { given(helloWorldService.returnHelloWorldWithException()).willReturn("Hello world"); Callable<String> timedSupplier = Observations .decorateCallable(observation, helloWorldService::returnHelloWorldWithException); String value = timedSupplier.call(); assertThatObservationWasStartedAndFinishedWithoutErrors(); assertThat(value).isEqualTo("Hello world"); then(helloWorldService).should(times(1)).returnHelloWorldWithException(); }
private <T> RestResponse<T> get(final String path, final Class<T> type) { return executeRequestSync(HttpMethod.GET, path, null, r -> deserialize(r.getBody(), type), Optional.empty()); }
@Test public void shouldPostQueryRequest_chunkHandler_closeEarlyWithError() { doThrow(new RuntimeException("Error!")).when(httpConnection).close(); ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST, Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT); executor.submit(this::expectPostQueryRequestChunkHandler); assertThatEventually(requestStarted::get, is(true)); handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [1.0, 12.1]}},\n")); closeConnection.complete(null); assertThatEventually(error::get, notNullValue()); assertThat(error.get().getMessage(), containsString("Error issuing POST to KSQL server. path:/query")); assertThat(rows.size(), is (1)); }
@Override public String getUrl() { return url != null ? url.originalArgument() : null; }
@Test void shouldReturnNullIfUrlForMaterialNotSpecified() { GitMaterialConfig config = git(); assertNull(config.getUrl()); }
@Nullable public MountInfo getMountInfo(long mountId) { try (LockResource r = new LockResource(mReadLock)) { for (Map.Entry<String, MountInfo> entry : mState.getMountTable().entrySet()) { if (entry.getValue().getMountId() == mountId) { return entry.getValue(); } } } return null; }
@Test public void getMountInfo() throws Exception { MountInfo info1 = new MountInfo(new AlluxioURI("/mnt/foo"), new AlluxioURI("hdfs://localhost:5678/foo"), 2L, MountContext.defaults().getOptions().build()); MountInfo info2 = new MountInfo(new AlluxioURI("/mnt/bar"), new AlluxioURI("hdfs://localhost:5678/bar"), 3L, MountContext.defaults().getOptions().build()); addMount("/mnt/foo", "hdfs://localhost:5678/foo", 2); addMount("/mnt/bar", "hdfs://localhost:5678/bar", 3); Assert.assertEquals(info1, mMountTable.getMountInfo(info1.getMountId())); Assert.assertEquals(info2, mMountTable.getMountInfo(info2.getMountId())); Assert.assertEquals(null, mMountTable.getMountInfo(4L)); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test public void getRecordsUsingExactValueInequalityDescending() { var expectedOrder = List.of(7, 4, 1, 6, 3, 0); var actual = store.getSqlRecordIteratorBatch(Comparison.LESS_OR_EQUAL, 1, true); assertResult(expectedOrder, actual); }
@Override public void execute(final List<String> args, final PrintWriter terminal) { CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP); if (args.isEmpty()) { final String setting = requestPipeliningSupplier.get() ? "ON" : "OFF"; terminal.printf("Current %s configuration: %s%n", NAME, setting); } else { final String newSetting = args.get(0); switch (newSetting.toUpperCase()) { case "ON": requestPipeliningConsumer.accept(true); break; case "OFF": requestPipeliningConsumer.accept(false); break; default: terminal.printf("Invalid %s setting: %s. ", NAME, newSetting); terminal.println("Valid options are 'ON' and 'OFF'."); return; } terminal.println(NAME + " configuration is now " + newSetting.toUpperCase()); } }
@Test public void shouldPrintCurrentSettingOfOn() { // Given: when(settingSupplier.get()).thenReturn(true); // When: requestPipeliningCommand.execute(Collections.emptyList(), terminal); // Then: assertThat(out.toString(), containsString(String.format("Current %s configuration: ON", RequestPipeliningCommand.NAME))); }
public static String build(String moduleName, String providerName) { return moduleName + "/" + providerName; }
@Test public void build() { String moduleName = "my-module"; String providerName = "my-provider-name"; String nodeName = NodeNameBuilder.build(moduleName, providerName); assertEquals(moduleName + "/" + providerName, nodeName); }
public static String sha3(String hexInput) { byte[] bytes = Numeric.hexStringToByteArray(hexInput); byte[] result = sha3(bytes); return Numeric.toHexString(result); }
@Test public void testSha3() { byte[] input = new byte[] { asByte(0x6, 0x8), asByte(0x6, 0x5), asByte(0x6, 0xc), asByte(0x6, 0xc), asByte(0x6, 0xf), asByte(0x2, 0x0), asByte(0x7, 0x7), asByte(0x6, 0xf), asByte(0x7, 0x2), asByte(0x6, 0xc), asByte(0x6, 0x4) }; byte[] expected = new byte[] { asByte(0x4, 0x7), asByte(0x1, 0x7), asByte(0x3, 0x2), asByte(0x8, 0x5), asByte(0xa, 0x8), asByte(0xd, 0x7), asByte(0x3, 0x4), asByte(0x1, 0xe), asByte(0x5, 0xe), asByte(0x9, 0x7), asByte(0x2, 0xf), asByte(0xc, 0x6), asByte(0x7, 0x7), asByte(0x2, 0x8), asByte(0x6, 0x3), asByte(0x8, 0x4), asByte(0xf, 0x8), asByte(0x0, 0x2), asByte(0xf, 0x8), asByte(0xe, 0xf), asByte(0x4, 0x2), asByte(0xa, 0x5), asByte(0xe, 0xc), asByte(0x5, 0xf), asByte(0x0, 0x3), asByte(0xb, 0xb), asByte(0xf, 0xa), asByte(0x2, 0x5), asByte(0x4, 0xc), asByte(0xb, 0x0), asByte(0x1, 0xf), asByte(0xa, 0xd) }; byte[] result = Hash.sha3(input); assertArrayEquals(result, (expected)); }
@Override public double d(int[] x, int[] y) { if (x.length != y.length) { throw new IllegalArgumentException(String.format("Arrays have different length: x[%d], y[%d]", x.length, y.length)); } int dist = 0; for (int i = 0; i < x.length; i++) { int d = Math.abs(x[i] - y[i]); dist += Math.min(d, q-d); } return dist; }
@Test public void testDistance() { System.out.println("distance"); int[] x = {3, 3, 4, 0}; int[] y = {2, 5, 4, 3}; LeeDistance instance = new LeeDistance(6); assertEquals(6.0, instance.d(x, y), 1E-9); }
public static void verifyChunkedSums(int bytesPerSum, int checksumType, ByteBuffer sums, ByteBuffer data, String fileName, long basePos) throws ChecksumException { nativeComputeChunkedSums(bytesPerSum, checksumType, sums, sums.position(), data, data.position(), data.remaining(), fileName, basePos, true); }
@Test public void testVerifyChunkedSumsSuccess() throws ChecksumException { allocateDirectByteBuffers(); fillDataAndValidChecksums(); NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id, checksums, data, fileName, BASE_POSITION); }
@SuppressWarnings("unchecked") void openDB(final Map<String, Object> configs, final File stateDir) { // initialize the default rocksdb options final DBOptions dbOptions = new DBOptions(); final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions); final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache(); cache = new LRUCache(BLOCK_CACHE_SIZE); tableConfig.setBlockCache(cache); tableConfig.setBlockSize(BLOCK_SIZE); filter = new BloomFilter(); tableConfig.setFilterPolicy(filter); userSpecifiedOptions.optimizeFiltersForHits(); userSpecifiedOptions.setTableFormatConfig(tableConfig); userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE); userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE); userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE); userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS); userSpecifiedOptions.setCreateIfMissing(true); userSpecifiedOptions.setErrorIfExists(false); userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); // this is the recommended way to increase parallelism in RocksDb // note that the current implementation of setIncreaseParallelism affects the number // of compaction threads but not flush threads (the latter remains one). Also, // the parallelism value needs to be at least two because of the code in // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580 // subtracts one from the value passed to determine the number of compaction threads // (this could be a bug in the RocksDB code and their devs have been contacted). userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2)); wOptions = new WriteOptions(); wOptions.setDisableWAL(true); fOptions = new FlushOptions(); fOptions.setWaitForFlush(true); final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (configSetterClass != null) { configSetter = Utils.newInstance(configSetterClass); configSetter.setConfig(name, userSpecifiedOptions, configs); } dbDir = new File(new File(stateDir, parentDir), name); try { Files.createDirectories(dbDir.getParentFile().toPath()); Files.createDirectories(dbDir.getAbsoluteFile().toPath()); } catch (final IOException fatal) { throw new ProcessorStateException(fatal); } // Setup statistics before the database is opened, otherwise the statistics are not updated // with the measurements from Rocks DB setupStatistics(configs, dbOptions); openRocksDB(dbOptions, columnFamilyOptions); dbAccessor = new DirectDBAccessor(db, fOptions, wOptions); open = true; addValueProvidersToMetricsRecorder(); }
@Test public void shouldAddValueProvidersWithStatisticsToInjectedMetricsRecorderWhenRecordingLevelDebug() { rocksDBStore = getRocksDBStoreWithRocksDBMetricsRecorder(); context = getProcessorContext(RecordingLevel.DEBUG); rocksDBStore.openDB(context.appConfigs(), context.stateDir()); verify(metricsRecorder).addValueProviders(eq(DB_NAME), notNull(), notNull(), notNull()); }
@Override @NonNull public List<Cookie> decode(@NonNull String header) { return HttpCookie.parse(header) .stream() .map(httpCookie -> (Cookie) new CookieHttpCookieAdapter(httpCookie)) .toList(); }
@Test void testCookieDecoding() { String header = "SID=31d4d96e407aad42; Path=/; Domain=example.com"; ServerCookieDecoder decoder = new DefaultServerCookieDecoder(); List<Cookie> cookies = decoder.decode(header); assertNotNull(cookies); assertEquals(1, cookies.size()); Cookie cookie = cookies.get(0); assertEquals("SID", cookie.getName()); assertEquals("31d4d96e407aad42", cookie.getValue()); assertEquals("/", cookie.getPath()); assertEquals("example.com", cookie.getDomain()); assertFalse(cookie.isHttpOnly()); assertFalse(cookie.isSecure()); assertTrue(cookie.getSameSite().isEmpty()); header = "SID=31d4d96e407aad42; Path=/; Secure; HttpOnly"; cookies = decoder.decode(header); assertNotNull(cookies); assertEquals(1, cookies.size()); cookie = cookies.get(0); assertEquals("SID", cookie.getName()); assertEquals("31d4d96e407aad42", cookie.getValue()); assertEquals("/", cookie.getPath()); assertNull(cookie.getDomain()); assertTrue(cookie.isHttpOnly()); assertTrue(cookie.isSecure()); assertTrue(cookie.getSameSite().isEmpty()); }
public CandidateView getCandidateView() { return mCandidateView; }
@Test public void testGetCandidateView() { final CandidateView originalView = mUnderTest.getCandidateView(); Assert.assertNotNull(originalView); AnyKeyboardView mock2 = Mockito.mock(AnyKeyboardView.class); mUnderTest.addView(mock2); Assert.assertSame(originalView, mUnderTest.getCandidateView()); mUnderTest.removeView(mock2); Assert.assertSame(originalView, mUnderTest.getCandidateView()); }
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void validate_with_insufficient_privileges() { String tokenResponse = "{\"scopes\": \"webhook pullrequest:write\", \"access_token\": \"token\", \"expires_in\": 7200, " + "\"token_type\": \"bearer\", \"state\": \"client_credentials\", \"refresh_token\": \"abc\"}"; server.enqueue(new MockResponse().setBody(tokenResponse).setResponseCode(200).setHeader("Content-Type", JSON_MEDIA_TYPE)); String error = "{\"type\": \"error\", \"error\": {\"message\": \"Your credentials lack one or more required privilege scopes.\", \"detail\": " + "{\"granted\": [\"email\"], \"required\": [\"account\"]}}}\n"; server.enqueue(new MockResponse().setBody(error).setResponseCode(400).setHeader("Content-Type", JSON_MEDIA_TYPE)); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")) .withMessage("Error returned by Bitbucket Cloud: Your credentials lack one or more required privilege scopes."); assertThat(logTester.logs(Level.INFO)).containsExactly(String.format(BBC_FAIL_WITH_RESPONSE, serverURL + "2.0/repositories/workspace", "400", error)); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( to == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null")); } final LocalDate fromDate = getLocalDateFromTemporal(from); if (fromDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function")); } final LocalDate toDate = getLocalDateFromTemporal(to); if (toDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function")); } return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0))); }
@Test void invokeYearMonthLocalDate() { FunctionTestUtil.assertResult( yamFunction.invoke( LocalDate.of(2017, 6, 12), YearMonth.of(2020, 4)), ComparablePeriod.of(2, 9, 0)); }
public int compute(final long address, final int offset, final int length) { try { return (int)UPDATE_BYTE_BUFFER.invokeExact(address, offset, length); } catch (final Throwable t) { LangUtil.rethrowUnchecked(t); return -1; } }
@Test void compute() { final Random random = new Random(-1234); final int offset = 3; final ByteBuffer buffer = ByteBuffer.allocateDirect(1024 + offset); final long address = address(buffer); for (int i = 1; i <= 1024; i++) { final int length = i; final byte[] data = new byte[length]; random.nextBytes(data); buffer.clear().position(offset); buffer.put(data); buffer.flip().position(offset); final CRC32 crc32 = new CRC32(); crc32.update(buffer); final int checksum = (int)crc32.getValue(); assertEquals(checksum, INSTANCE.compute(address, offset, length), () -> "Failed on length: " + length); } }
public DataSourceConfiguration swap(final YamlProxyDataSourceConfiguration yamlConfig) { return new DataSourceConfiguration(swapConnectionConfiguration(yamlConfig), swapPoolConfiguration(yamlConfig)); }
@Test void assertSwap() throws IOException { YamlProxyConfiguration yamlProxyConfig = ProxyConfigurationLoader.load("/conf/swap"); YamlProxyDataSourceConfiguration yamlProxyDataSourceConfig = yamlProxyConfig.getDatabaseConfigurations().get("swapper_test").getDataSources().get("foo_db"); DataSourceConfiguration actualDataSourceConfig = new YamlProxyDataSourceConfigurationSwapper().swap(yamlProxyDataSourceConfig); assertConnectionConfig(actualDataSourceConfig); assertPoolConfig(actualDataSourceConfig); }
@Override public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) { SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter)); if (request.inParallel()) { ForkJoinPool commonPool = ForkJoinPool.commonPool(); getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join); } else { getProviderTasks(request, session).forEach(Runnable::run); } return session.getResults(); }
@Test public void testNodeId() { GraphGenerator generator = GraphGenerator.build().generateTinyGraph(); SearchRequest request = buildRequest(GraphGenerator.FIRST_NODE, generator); Collection<SearchResult<Node>> results = controller.search(request, Node.class); Assert.assertEquals(1, results.size()); Assert.assertEquals(GraphGenerator.FIRST_NODE, results.iterator().next().getResult().getId()); }
public static GetApplicationsResponse mergeApplications( Collection<GetApplicationsResponse> responses, boolean returnPartialResult){ Map<ApplicationId, ApplicationReport> federationAM = new HashMap<>(); Map<ApplicationId, ApplicationReport> federationUAMSum = new HashMap<>(); for (GetApplicationsResponse appResponse : responses){ for (ApplicationReport appReport : appResponse.getApplicationList()){ ApplicationId appId = appReport.getApplicationId(); // Check if this ApplicationReport is an AM if (!appReport.isUnmanagedApp()) { // Insert in the list of AM federationAM.put(appId, appReport); // Check if there are any UAM found before if (federationUAMSum.containsKey(appId)) { // Merge the current AM with the found UAM mergeAMWithUAM(appReport, federationUAMSum.get(appId)); // Remove the sum of the UAMs federationUAMSum.remove(appId); } // This ApplicationReport is an UAM } else if (federationAM.containsKey(appId)) { // Merge the current UAM with its own AM mergeAMWithUAM(federationAM.get(appId), appReport); } else if (federationUAMSum.containsKey(appId)) { // Merge the current UAM with its own UAM and update the list of UAM ApplicationReport mergedUAMReport = mergeUAMWithUAM(federationUAMSum.get(appId), appReport); federationUAMSum.put(appId, mergedUAMReport); } else { // Insert in the list of UAM federationUAMSum.put(appId, appReport); } } } // Check the remaining UAMs are depending or not from federation for (ApplicationReport appReport : federationUAMSum.values()) { if (mergeUamToReport(appReport.getName(), returnPartialResult)) { federationAM.put(appReport.getApplicationId(), appReport); } } return GetApplicationsResponse.newInstance(federationAM.values()); }
@Test public void testMergeApplications() { ArrayList<GetApplicationsResponse> responses = new ArrayList<>(); responses.add(getApplicationsResponse(1, false)); responses.add(getApplicationsResponse(2, false)); GetApplicationsResponse result = RouterYarnClientUtils. mergeApplications(responses, false); Assert.assertNotNull(result); Assert.assertEquals(2, result.getApplicationList().size()); String appName1 = result.getApplicationList().get(0).getName(); String appName2 = result.getApplicationList().get(1).getName(); // Check that no Unmanaged applications are added to the result Assert.assertEquals(false, appName1.contains(UnmanagedApplicationManager.APP_NAME)); Assert.assertEquals(false, appName2.contains(UnmanagedApplicationManager.APP_NAME)); }
@GetMapping("list") public Mono<String> getProductsListPage(Model model, @RequestParam(name = "filter", required = false) String filter) { model.addAttribute("filter", filter); return this.productsClient.findAllProducts(filter) .collectList() .doOnNext(products -> model.addAttribute("products", products)) .thenReturn("customer/products/list"); }
@Test void getProductsListPage_ReturnsProductsListPage() { // given var model = new ConcurrentModel(); doReturn(Flux.fromIterable(List.of( new Product(1, "Отфильтрованный товар №1", "Описание отфильтрованного товара №1"), new Product(2, "Отфильтрованный товар №2", "Описание отфильтрованного товара №2"), new Product(3, "Отфильтрованный товар №3", "Описание отфильтрованного товара №3") ))).when(this.productsClient).findAllProducts("фильтр"); // when StepVerifier.create(this.controller.getProductsListPage(model, "фильтр")) // then .expectNext("customer/products/list") .verifyComplete(); assertEquals("фильтр", model.getAttribute("filter")); assertEquals(List.of( new Product(1, "Отфильтрованный товар №1", "Описание отфильтрованного товара №1"), new Product(2, "Отфильтрованный товар №2", "Описание отфильтрованного товара №2"), new Product(3, "Отфильтрованный товар №3", "Описание отфильтрованного товара №3")), model.getAttribute("products")); verify(this.productsClient).findAllProducts("фильтр"); verifyNoMoreInteractions(this.productsClient); verifyNoInteractions(this.favouriteProductsClient); }
@GET @Path("/entity-uid/{uid}/") @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8) public TimelineEntity getEntity( @Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam("uid") String uId, @QueryParam("confstoretrieve") String confsToRetrieve, @QueryParam("metricstoretrieve") String metricsToRetrieve, @QueryParam("fields") String fields, @QueryParam("metricslimit") String metricsLimit, @QueryParam("metricstimestart") String metricsTimeStart, @QueryParam("metricstimeend") String metricsTimeEnd) { String url = req.getRequestURI() + (req.getQueryString() == null ? "" : QUERY_STRING_SEP + req.getQueryString()); UserGroupInformation callerUGI = TimelineReaderWebServicesUtils.getUser(req); LOG.info("Received URL {} from user {}", url, TimelineReaderWebServicesUtils.getUserName(callerUGI)); long startTime = Time.monotonicNow(); boolean succeeded = false; init(res); TimelineReaderManager timelineReaderManager = getTimelineReaderManager(); TimelineEntity entity = null; try { TimelineReaderContext context = TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId); if (context == null) { throw new BadRequestException("Incorrect UID " + uId); } entity = timelineReaderManager.getEntity(context, TimelineReaderWebServicesUtils.createTimelineDataToRetrieve( confsToRetrieve, metricsToRetrieve, fields, metricsLimit, metricsTimeStart, metricsTimeEnd)); checkAccessForGenericEntity(entity, callerUGI); succeeded = true; } catch (Exception e) { handleException(e, url, startTime, "Either metricslimit or metricstime" + " start/end"); } finally { long latency = Time.monotonicNow() - startTime; METRICS.addGetEntitiesLatency(latency, succeeded); LOG.info("Processed URL {} (Took {} ms.)", url, latency); } if (entity == null) { LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)", url, (Time.monotonicNow() - startTime)); throw new NotFoundException("Timeline entity with uid: " + uId + "is not found"); } return entity; }
@Test void testHealthCheck() throws Exception { Client client = createClient(); try { URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/health"); ClientResponse resp = getResponse(client, uri); TimelineHealth timelineHealth = resp.getEntity(new GenericType<TimelineHealth>() { }); assertEquals(200, resp.getStatus()); assertEquals(TimelineHealth.TimelineHealthStatus.RUNNING, timelineHealth.getHealthStatus()); } finally { client.destroy(); } }
protected static boolean isSingleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})"); }
@Test public void testSingleQuoted() { assertTrue(isSingleQuoted("\"c:\\program files\\test\"")); }
public static Base64URL getCodeHash(JWSAlgorithm signingAlg, String code) { return getHash(signingAlg, code.getBytes()); }
@Test public void getCodeHash512() { String testCode = "b0x0rZ"; Base64URL expectedHash = new Base64URL("R5DCRi5eOjlvyTAJfry2dNM9adJ2ElpDEKYYByYU920"); // independently generated Base64URL resultHash = IdTokenHashUtils.getCodeHash(JWSAlgorithm.ES512, testCode); assertEquals(expectedHash, resultHash); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowStatement) { return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement)); } return Optional.empty(); }
@Test void assertCreateWithSelectPgCatalogWithSubquery() { SQLStatement sqlStatement = parseSQL(SELECT_PG_CATALOG_WITH_SUBQUERY); SelectStatementContext selectStatementContext = mock(SelectStatementContext.class); when(selectStatementContext.getSqlStatement()).thenReturn((SelectStatement) sqlStatement); Optional<DatabaseAdminExecutor> actual = new PostgreSQLAdminExecutorCreator().create(selectStatementContext, SELECT_PG_CATALOG_WITH_SUBQUERY, "", Collections.emptyList()); assertFalse(actual.isPresent()); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitMulti[] submitMulties = createSubmitMulti(exchange); List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length); for (SubmitMulti submitMulti : submitMulties) { SubmitMultiResult result; if (log.isDebugEnabled()) { log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId()); } try { result = session.submitMultiple( submitMulti.getServiceType(), TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()), submitMulti.getSourceAddr(), (Address[]) submitMulti.getDestAddresses(), new ESMClass(submitMulti.getEsmClass()), submitMulti.getProtocolId(), submitMulti.getPriorityFlag(), submitMulti.getScheduleDeliveryTime(), submitMulti.getValidityPeriod(), new RegisteredDelivery(submitMulti.getRegisteredDelivery()), new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()), DataCodings.newInstance(submitMulti.getDataCoding()), submitMulti.getSmDefaultMsgId(), submitMulti.getShortMessage(), submitMulti.getOptionalParameters()); results.add(result); } catch (Exception e) { throw new SmppException(e); } } if (log.isDebugEnabled()) { log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(), results); } List<String> messageIDs = new ArrayList<>(results.size()); // {messageID : [{destAddr : address, error : errorCode}]} Map<String, List<Map<String, Object>>> errors = new HashMap<>(); for (SubmitMultiResult result : results) { UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries(); if (deliveries != null) { List<Map<String, Object>> undelivered = new ArrayList<>(); for (UnsuccessDelivery delivery : deliveries) { Map<String, Object> error = new HashMap<>(); error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress()); error.put(SmppConstants.ERROR, delivery.getErrorStatusCode()); undelivered.add(error); } if (!undelivered.isEmpty()) { errors.put(result.getMessageId(), undelivered); } } messageIDs.add(result.getMessageId()); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); if (!errors.isEmpty()) { message.setHeader(SmppConstants.ERROR, errors); } }
@Test public void bodyWithSmscDefaultDataCodingNarrowedToCharset() throws Exception { final byte dataCoding = (byte) 0x00; /* SMSC-default */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; byte[] bodyNarrowed = { '?', 'A', 'B', '\0', '?', (byte) 0x7F, 'C', '?' }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); Address[] destAddrs = new Address[] { new Address( TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, "1717") }; when(session.submitMultiple(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), eq(destAddrs), eq(new ESMClass()), eq((byte) 0), eq((byte) 1), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq(ReplaceIfPresentFlag.DEFAULT), eq(DataCodings.newInstance(dataCoding)), eq((byte) 0), eq(bodyNarrowed))) .thenReturn(new SubmitMultiResult("1", null, null)); command.execute(exchange); assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID)); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); checkNotNull(actual); if (!actual.entrySet().contains(entry)) { List<@Nullable Object> keyList = singletonList(key); List<@Nullable Object> valueList = singletonList(value); if (actual.containsKey(key)) { Object actualValue = actual.get(key); /* * In the case of a null expected or actual value, clarify that the key *is* present and * *is* expected to be present. That is, get() isn't returning null to indicate that the key * is missing, and the user isn't making an assertion that the key is missing. */ StandardSubjectBuilder check = check("get(%s)", key); if (value == null || actualValue == null) { check = check.withMessage("key is present but with a different value"); } // See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription. check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value); } else if (hasMatchingToStringPair(actual.keySet(), keyList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain keys", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entrySet()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (hasMatchingToStringPair(actual.values(), valueList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain values", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", entry); } } }
@Test public void containsKeyWithNullValueNullExpected() { Map<String, String> actual = Maps.newHashMap(); actual.put("a", null); assertThat(actual).containsEntry("a", null); }
public List<SourceAndTarget> clusterPairs() { List<SourceAndTarget> pairs = new ArrayList<>(); Set<String> clusters = clusters(); Map<String, String> originalStrings = originalsStrings(); boolean globalHeartbeatsEnabled = MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED_DEFAULT; if (originalStrings.containsKey(MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)) { globalHeartbeatsEnabled = Boolean.parseBoolean(originalStrings.get(MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)); } for (String source : clusters) { for (String target : clusters) { if (!source.equals(target)) { String clusterPairConfigPrefix = source + "->" + target + "."; boolean clusterPairEnabled = Boolean.parseBoolean(originalStrings.get(clusterPairConfigPrefix + "enabled")); boolean clusterPairHeartbeatsEnabled = globalHeartbeatsEnabled; if (originalStrings.containsKey(clusterPairConfigPrefix + MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)) { clusterPairHeartbeatsEnabled = Boolean.parseBoolean(originalStrings.get(clusterPairConfigPrefix + MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)); } // By default, all source->target Herder combinations are created even if `x->y.enabled=false` // Unless `emit.heartbeats.enabled=false` or `x->y.emit.heartbeats.enabled=false` // Reason for this behavior: for a given replication flow A->B with heartbeats, 2 herders are required : // B->A for the MirrorHeartbeatConnector (emits heartbeats into A for monitoring replication health) // A->B for the MirrorSourceConnector (actual replication flow) if (clusterPairEnabled || clusterPairHeartbeatsEnabled) { pairs.add(new SourceAndTarget(source, target)); } } } } return pairs; }
@Test public void testClusterPairsWithTwoDisabledHeartbeats() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b, c", "a->b.emit.heartbeats.enabled", "false", "a->c.emit.heartbeats.enabled", "false")); List<SourceAndTarget> clusterPairs = mirrorConfig.clusterPairs(); assertEquals(4, clusterPairs.size(), "clusterPairs count should match all combinations count except x->y.emit.heartbeats.enabled=false"); }
@Override public void start() { if (started) { return; } synchronized (this) { if (started) { return; } // 绑定到端口 try { httpServer.start(); if (LOGGER.isInfoEnabled()) { LOGGER.info("Start the http rest server at port {}", serverConfig.getPort()); } } catch (SofaRpcRuntimeException e) { throw e; } catch (Exception e) { throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_START_SERVER_WITH_PORT, "rest", serverConfig.getPort()), e); } started = true; } }
@Test public void start() { String host = "127.0.0.1"; int port = 18801; ServerConfig serverConfig = new ServerConfig(); serverConfig.setBoundHost(host); serverConfig.setPort(port); serverConfig.setProtocol(RpcConstants.PROTOCOL_TYPE_REST); RestServer server = new RestServer(); server.init(serverConfig); server.start(); Assert.assertTrue(server.started); Assert.assertTrue(NetUtils.canTelnet(host, port, 1000)); // 重复启动 server.start(); server.stop(); Assert.assertFalse(server.started); Assert.assertFalse(NetUtils.canTelnet(host, port, 1000)); // 重复关闭 server.stop(); // 销毁 server.init(serverConfig); server.start(); Assert.assertTrue(server.started); Assert.assertTrue(NetUtils.canTelnet(host, port, 1000)); server.destroy(null); // 销毁 server.init(serverConfig); server.start(); Assert.assertTrue(server.started); Assert.assertTrue(NetUtils.canTelnet(host, port, 1000)); server.destroy(new Destroyable.DestroyHook() { @Override public void preDestroy() { } @Override public void postDestroy() { } }); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void feed_block_disengage_edge_emits_cluster_event() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .feedBlockBefore(ClusterStateBundle.FeedBlock.blockedWithDescription("we're closed")) .clusterStateAfter("distributor:3 storage:3") .feedBlockAfter(null); final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(1)); assertThat(events, hasItem(clusterEventWithDescription("Cluster feed no longer blocked"))); }
public static void addRouterIface(Port osPort, OpenstackRouterAdminService adminService) { osPort.getFixedIps().forEach(p -> { JsonNode jsonTree = new ObjectMapper().createObjectNode() .put("id", osPort.getDeviceId()) .put("tenant_id", osPort.getTenantId()) .put("subnet_id", p.getSubnetId()) .put("port_id", osPort.getId()); try { RouterInterface rIface = getContext(NeutronRouterInterface.class) .readerFor(NeutronRouterInterface.class) .readValue(jsonTree); if (adminService.routerInterface(rIface.getPortId()) != null) { adminService.updateRouterInterface(rIface); } else { adminService.addRouterInterface(rIface); } } catch (IOException e) { log.error("IOException occurred because of {}", e); } }); }
@Test public void testAddRouterIface() { OpenstackRouterAdminService service = new TestOpenstackRouterAdminService(); addRouterIface(openstackPort, service); RouterInterface initialRouterInterface = new TestRouterInterface(openstackPort.getDeviceId(), openstackPort.getFixedIps().stream().findAny().get().getSubnetId(), openstackPort.getId(), openstackPort.getTenantId()); assertTrue(routerInterfacesEquals(initialRouterInterface, service.routerInterface(openstackPort.getId()))); addRouterIface(openstackSriovPort1, service); RouterInterface updatedInitialRouterInterface = new TestRouterInterface(openstackSriovPort1.getDeviceId(), openstackSriovPort1.getFixedIps().stream().findAny().get().getSubnetId(), openstackSriovPort1.getId(), openstackSriovPort1.getTenantId()); assertTrue(routerInterfacesEquals( updatedInitialRouterInterface, service.routerInterface(openstackSriovPort1.getId()))); }
protected boolean needFiltering(Exchange exchange) { // exchange property takes precedence over data format property return exchange == null ? filterNonXmlChars : exchange.getProperty(Exchange.FILTER_NON_XML_CHARS, filterNonXmlChars, Boolean.class); }
@Test public void testNeedFilteringFalsePropagates() { Exchange exchange = new DefaultExchange(camelContext); exchange.setProperty(Exchange.FILTER_NON_XML_CHARS, Boolean.FALSE); assertFalse(jaxbDataFormat.needFiltering(exchange)); }
@Override public void cancel() { operationFailureCause = new FlinkException("The job was cancelled."); context.goToCanceling( getExecutionGraph(), getExecutionGraphHandler(), getOperatorCoordinatorHandler(), getFailures()); }
@Test void testTransitionToCancellingOnCancel() throws Exception { try (MockStopWithSavepointContext ctx = new MockStopWithSavepointContext()) { StopWithSavepoint sws = createStopWithSavepoint(ctx); ctx.setStopWithSavepoint(sws); ctx.setExpectCancelling(assertNonNull()); sws.cancel(); } }