focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean persistent() { return store.persistent(); }
@Test public void shouldReturnPersistentForVersionedStore() { givenWrapperWithVersionedStore(); // test "persistent = true" when(versionedStore.persistent()).thenReturn(true); assertThat(wrapper.persistent(), equalTo(true)); // test "persistent = false" when(versionedStore.persistent()).thenReturn(false); assertThat(wrapper.persistent(), equalTo(false)); }
@Override public OptionalLong apply(OptionalLong previousSendTimeNs) { long delayNs; if (previousGlobalFailures > 0) { // If there were global failures (like a response timeout), we want to wait for the // full backoff period. delayNs = backoff.backoff(previousGlobalFailures); } else if ((numReadyRequests > MAX_ASSIGNMENTS_PER_REQUEST) && !hasInflightRequests) { // If there were no previous failures, and we have lots of requests, send it as soon // as possible. delayNs = 0; } else { // Otherwise, use the standard delay period. This helps to promote batching, which // reduces load on the controller. delayNs = backoff.initialInterval(); } long newSendTimeNs = nowNs + delayNs; if (previousSendTimeNs.isPresent() && previousSendTimeNs.getAsLong() < newSendTimeNs) { // If the previous send time was before the new one we calculated, go with the // previous one. return previousSendTimeNs; } // Otherwise, return our new send time. return OptionalLong.of(newSendTimeNs); }
@Test public void applyAfterDispatchIntervalWithExistingEarlierDeadline() { assertEquals(OptionalLong.of(BACKOFF.initialInterval() / 2), new AssignmentsManagerDeadlineFunction(BACKOFF, 0, 0, false, 12). apply(OptionalLong.of(BACKOFF.initialInterval() / 2))); }
public String redirectWithCorrectAttributesForAd(HttpServletRequest httpRequest, AuthenticationRequest authenticationRequest) throws UnsupportedEncodingException, SamlSessionException { SamlSession samlSession = authenticationRequest.getSamlSession(); if (samlSession.getValidationStatus() != null && samlSession.getValidationStatus().equals(STATUS_INVALID.label)) { return cancelAuthenticationToAd(authenticationRequest, samlSession.getArtifact()); } else if (samlSession.getRequesterId() != null && samlSession.getRequesterId().equals(bvdEntityId)) { prepareAuthenticationToAdForBvd(samlSession); String bvdReturnUrl = generateReturnUrl(httpRequest, samlSession.getArtifact(), ENTRANCE_RETURN_URL_BVD); logger.info("Prepare authentication to Ad for Bvd"); return prepareAuthenticationToAd(bvdReturnUrl, authenticationRequest); } else { String adReturnUrl = generateReturnUrl(httpRequest, samlSession.getArtifact(), REDIRECT_WITH_ARTIFACT_URL); return prepareAuthenticationToAd(adReturnUrl, authenticationRequest); } }
@Test public void redirectWithCorrectAttributesForAdTest() throws UnsupportedEncodingException, SamlSessionException { AuthenticationRequest authenticationRequest = new AuthenticationRequest(); authenticationRequest.setRequest(httpServletRequestMock); SamlSession samlSession = new SamlSession(1L); samlSession.setRequesterId("DvEntity"); samlSession.setArtifact("artifact"); authenticationRequest.setSamlSession(samlSession); String result = authenticationEntranceService.redirectWithCorrectAttributesForAd(httpServletRequestMock, authenticationRequest); assertNotNull(result); assertNull(samlSession.getTransactionId()); assertEquals(result, frontChannel); }
public static GlobalConfig readConfig() throws IOException, InvalidGlobalConfigException { return readConfig(getConfigDir()); }
@Test public void testReadConfig_emptyFile() throws IOException { temporaryFolder.newFile("config.json"); IOException exception = assertThrows(IOException.class, () -> GlobalConfig.readConfig(configDir)); assertThat(exception) .hasMessageThat() .startsWith( "Failed to create, open, or parse global Jib config file; see " + "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md#where-is-the-global-jib-configuration-file-and-how-i-can-configure-it " + "to fix or you may need to delete"); assertThat(exception).hasMessageThat().endsWith(File.separator + "config.json"); }
Dependency newDependency(MavenProject prj) { final File pom = new File(prj.getBasedir(), "pom.xml"); if (pom.isFile()) { getLog().debug("Adding virtual dependency from pom.xml"); return new Dependency(pom, true); } else if (prj.getFile().isFile()) { getLog().debug("Adding virtual dependency from file"); return new Dependency(prj.getFile(), true); } else { return new Dependency(true); } }
@Test public void should_newDependency_get_pom_from_base_dir() { // Given BaseDependencyCheckMojo instance = new BaseDependencyCheckMojoImpl(); new MockUp<MavenProject>() { @Mock public File getBasedir() { return new File("src/test/resources/maven_project_base_dir"); } }; String expectOutput = "pom.xml"; // When String output = instance.newDependency(project).getFileName(); // Then assertEquals(expectOutput, output); }
public void createUser(String username, String password, String realm, Encryption encryption, List<String> userGroups, List<String> algorithms) { if (users.containsKey(username)) { throw MSG.userToolUserExists(username); } realm = checkRealm(realm); users.put(username, Encryption.CLEAR.equals(encryption) ? password : encryptPassword(username, realm, password, algorithms)); groups.put(username, userGroups != null ? String.join(",", userGroups) : ""); store(realm, encryption); }
@Test public void testUserToolEncrypted() throws Exception { UserTool userTool = new UserTool(serverDirectory.getAbsolutePath()); userTool.createUser("user", "password", UserTool.DEFAULT_REALM_NAME, UserTool.Encryption.ENCRYPTED, Collections.singletonList("admin"), null); Properties users = loadProperties("users.properties"); assertEquals(1, users.size()); assertPassword("password", users.getProperty("user")); Properties groups = loadProperties("groups.properties"); assertEquals(1, groups.size()); assertEquals("admin", groups.getProperty("user")); }
public static void main(String[] args) { // Square frame buffer Parameters.SQUARE_FRAME_BUFFER = false; HillsRenderConfig hillsCfg = null; File demFolder = getDemFolder(args); if (demFolder != null) { MemoryCachingHgtReaderTileSource tileSource = new MemoryCachingHgtReaderTileSource(new DemFolderFS(demFolder), new ClearAsymmetryShadingAlgorithm(), AwtGraphicFactory.INSTANCE); tileSource.setEnableInterpolationOverlap(true); hillsCfg = new HillsRenderConfig(tileSource); hillsCfg.indexOnThread(); args = Arrays.copyOfRange(args, 1, args.length); } List<File> mapFiles = SHOW_RASTER_MAP ? null : getMapFiles(args); final MapView mapView = createMapView(); final BoundingBox boundingBox = addLayers(mapView, mapFiles, hillsCfg); final PreferencesFacade preferencesFacade = new JavaPreferences(Preferences.userNodeForPackage(Samples.class)); final JFrame frame = new JFrame(); frame.setTitle("Mapsforge Samples"); frame.add(mapView); frame.pack(); frame.setSize(1024, 768); frame.setLocationRelativeTo(null); frame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE); frame.addWindowListener(new WindowAdapter() { @Override public void windowClosing(WindowEvent e) { int result = JOptionPane.showConfirmDialog(frame, MESSAGE, TITLE, JOptionPane.YES_NO_OPTION); if (result == JOptionPane.YES_OPTION) { mapView.getModel().save(preferencesFacade); mapView.destroyAll(); AwtGraphicFactory.clearResourceMemoryCache(); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); } } @Override public void windowOpened(WindowEvent e) { final Model model = mapView.getModel(); model.init(preferencesFacade); if (model.mapViewPosition.getZoomLevel() == 0 || !boundingBox.contains(model.mapViewPosition.getCenter())) { byte zoomLevel = LatLongUtils.zoomForBounds(model.mapViewDimension.getDimension(), boundingBox, model.displayModel.getTileSize()); model.mapViewPosition.setMapPosition(new MapPosition(boundingBox.getCenterPoint(), zoomLevel)); } } }); frame.setVisible(true); }
@Test public void mainTest() { String[] args = new String[]{}; verifyInvalidArguments(args); /*args = new String[]{"file/not/found.map"}; verifyInvalidArguments(args);*/ }
public JsonNode resolve(JsonNode tree, String path, String refFragmentPathDelimiters) { return resolve(tree, new ArrayList<>(asList(split(path, refFragmentPathDelimiters)))); }
@Test public void hashResolvesToRoot() { ObjectNode root = new ObjectMapper().createObjectNode(); root.set("child1", root.objectNode()); root.set("child2", root.objectNode()); root.set("child3", root.objectNode()); assertThat((ObjectNode) resolver.resolve(root, "#", "#/."), is(sameInstance(root))); }
@Override public boolean equals(@Nullable Object object) { if (object instanceof CounterCell) { CounterCell counterCell = (CounterCell) object; return Objects.equals(dirty, counterCell.dirty) && value.get() == counterCell.value.get() && Objects.equals(name, counterCell.name); } return false; }
@Test public void testEquals() { CounterCell counterCell = new CounterCell(MetricName.named("namespace", "name")); CounterCell equal = new CounterCell(MetricName.named("namespace", "name")); Assert.assertEquals(counterCell, equal); Assert.assertEquals(counterCell.hashCode(), equal.hashCode()); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldAllowClassesWithSameFQCNInDifferentUDFJars() throws Exception { final File pluginDir = tempFolder.newFolder(); Files.copy(Paths.get("src/test/resources/udf-example.jar"), new File(pluginDir, "udf-example.jar").toPath()); Files.copy(Paths.get("src/test/resources/udf-isolated.jar"), new File(pluginDir, "udf-isolated.jar").toPath()); final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry(); final UserFunctionLoader udfLoader = new UserFunctionLoader( functionRegistry, pluginDir, PARENT_CLASS_LOADER, value -> false, Optional.empty(), true); udfLoader.load(); final UdfFactory multiply = functionRegistry.getUdfFactory(FunctionName.of("multiply")); final UdfFactory multiply2 = functionRegistry.getUdfFactory(FunctionName.of("multiply2")); final Kudf multiplyUdf = multiply.getFunction(Arrays.asList(SqlArgument.of( SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER)) ).newInstance(ksqlConfig); final Kudf multiply2Udf = multiply2 .getFunction(Arrays.asList(SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER))) .newInstance(ksqlConfig); assertThat(multiplyUdf.evaluate(2, 2), equalTo(4L)); assertThat(multiply2Udf.evaluate(2, 2), equalTo(5L)); }
public static synchronized TransformServiceLauncher forProject( @Nullable String projectName, int port, @Nullable String pythonRequirementsFile) throws IOException { if (projectName == null || projectName.isEmpty()) { projectName = DEFAULT_PROJECT_NAME; } if (!launchers.containsKey(projectName)) { launchers.put( projectName, new TransformServiceLauncher(projectName, port, pythonRequirementsFile)); } return launchers.get(projectName); }
@Test public void testLauncherInstallsLocalDependencies() throws IOException { String projectName = UUID.randomUUID().toString(); Path expectedTempDir = Paths.get(System.getProperty("java.io.tmpdir"), projectName); File file = expectedTempDir.toFile(); file.deleteOnExit(); String dependency1FileName = "dep_" + UUID.randomUUID().toString(); File dependency1 = Paths.get(System.getProperty("java.io.tmpdir"), dependency1FileName).toFile(); dependency1.deleteOnExit(); try (Writer fout = new OutputStreamWriter( new FileOutputStream(dependency1.getAbsolutePath()), StandardCharsets.UTF_8)) { fout.write("tempdata\n"); } String dependency2FileName = "dep_" + UUID.randomUUID().toString(); File dependency2 = Paths.get(System.getProperty("java.io.tmpdir"), dependency2FileName).toFile(); dependency2.deleteOnExit(); try (Writer fout = new OutputStreamWriter( new FileOutputStream(dependency2.getAbsolutePath()), StandardCharsets.UTF_8)) { fout.write("tempdata\n"); } File requirementsFile = Paths.get( System.getProperty("java.io.tmpdir"), ("requirements" + UUID.randomUUID().toString() + ".txt")) .toFile(); requirementsFile.deleteOnExit(); try (Writer fout = new OutputStreamWriter( new FileOutputStream(requirementsFile.getAbsolutePath()), StandardCharsets.UTF_8)) { fout.write(dependency1.getAbsolutePath() + "\n"); fout.write(dependency2.getAbsolutePath() + "\n"); fout.write("pypipackage" + "\n"); } TransformServiceLauncher.forProject(projectName, 12345, requirementsFile.getAbsolutePath()); // Confirming that the Transform Service launcher created a temporary requirements file with the // specified set of packages. Path expectedUpdatedRequirementsFile = Paths.get(expectedTempDir.toString(), "dependencies_dir", "requirements.txt"); Assert.assertTrue(expectedUpdatedRequirementsFile.toFile().exists()); ArrayList<String> expectedUpdatedRequirementsFileLines = new ArrayList<>(); try (BufferedReader bufReader = Files.newBufferedReader(expectedUpdatedRequirementsFile, UTF_8)) { String line = bufReader.readLine(); while (line != null) { expectedUpdatedRequirementsFileLines.add(line); line = bufReader.readLine(); } } // To make local packages available to the expansion service Docker containers, the temporary // requirements file should contain names of the local packages relative to the dependencies // volume and local packages should have been copied to the dependencies volume. Assert.assertEquals(3, expectedUpdatedRequirementsFileLines.size()); Assert.assertTrue(expectedUpdatedRequirementsFileLines.contains(dependency1FileName)); Assert.assertTrue(expectedUpdatedRequirementsFileLines.contains(dependency2FileName)); Assert.assertTrue(expectedUpdatedRequirementsFileLines.contains("pypipackage")); Assert.assertTrue( Paths.get(expectedTempDir.toString(), "dependencies_dir", dependency1FileName) .toFile() .exists()); Assert.assertTrue( Paths.get(expectedTempDir.toString(), "dependencies_dir", dependency2FileName) .toFile() .exists()); }
static FEELFnResult<Boolean> matchFunctionWithFlags(String input, String pattern, String flags) { log.debug("Input: {} , Pattern: {}, Flags: {}", input, pattern, flags); if ( input == null ) { throw new InvalidParameterException("input"); } if ( pattern == null ) { throw new InvalidParameterException("pattern"); } final String flagsString; if (flags != null && !flags.isEmpty()) { checkFlags(flags); if(!flags.contains("U")){ flags += "U"; } flagsString = String.format("(?%s)", flags); } else { flagsString = ""; } log.debug("flagsString: {}", flagsString); String stringToBeMatched = flagsString + pattern; log.debug("stringToBeMatched: {}", stringToBeMatched); Pattern p=Pattern.compile(stringToBeMatched); Matcher m = p.matcher( input ); boolean matchFound=m.find(); log.debug("matchFound: {}", matchFound); return FEELFnResult.ofResult(matchFound); }
@Test void invokeNull() { assertThrows(InvalidParameterException.class, () -> MatchesFunction.matchFunctionWithFlags(null, null, null)); assertThrows(InvalidParameterException.class, () -> MatchesFunction.matchFunctionWithFlags(null, "test",null)); assertThrows(InvalidParameterException.class, () -> MatchesFunction.matchFunctionWithFlags("test", null,null)); }
@Override public Repository getRepository() { //Repository may be null if executing remotely in Pentaho Server Repository repository = super.getRepository(); return repository != null ? repository : getTransMeta().getRepository(); }
@Test public void getRepositoryNullTest() { metaInject.getRepository(); //If repository is not set in the base step (Remote Executions/Scheduling) Need to get the repository from TransMeta verify( metaInject, times( 1 ) ).getTransMeta(); }
public Query generateChecksumQuery(QualifiedName tableName, List<Column> columns, Optional<Expression> partitionPredicate) { ImmutableList.Builder<SelectItem> selectItems = ImmutableList.builder(); selectItems.add(new SingleColumn(new FunctionCall(QualifiedName.of("count"), ImmutableList.of()))); for (Column column : columns) { selectItems.addAll(columnValidators.get(column.getCategory()).get().generateChecksumColumns(column)); } return simpleQuery(new Select(false, selectItems.build()), new Table(tableName), partitionPredicate, Optional.empty()); }
@Test public void testChecksumQuery() { Query checksumQuery = checksumValidator.generateChecksumQuery( QualifiedName.of("test:di"), ImmutableList.of( BIGINT_COLUMN, VARCHAR_COLUMN, DOUBLE_COLUMN, REAL_COLUMN, DOUBLE_ARRAY_COLUMN, REAL_ARRAY_COLUMN, INT_ARRAY_COLUMN, ROW_ARRAY_COLUMN, MAP_ARRAY_COLUMN, MAP_COLUMN, MAP_FLOAT_NON_FLOAT_COLUMN, MAP_NON_ORDERABLE_COLUMN, ROW_COLUMN), Optional.empty()); Statement expectedChecksumQuery = sqlParser.createStatement( "SELECT\n" + " \"count\"(*)\n" + ", \"checksum\"(\"bigint\") \"bigint$checksum\"\n" + ", \"checksum\"(\"varchar\") \"varchar$checksum\"\n" + ", \"sum\"(\"double\") FILTER (WHERE \"is_finite\"(\"double\")) \"double$sum\"\n" + ", \"count\"(\"double\") FILTER (WHERE \"is_nan\"(\"double\")) \"double$nan_count\"\n" + ", \"count\"(\"double\") FILTER (WHERE (\"double\" = \"infinity\"())) \"double$pos_inf_count\"\n" + ", \"count\"(\"double\") FILTER (WHERE (\"double\" = -\"infinity\"())) \"double$neg_inf_count\"\n" + ", \"sum\"(CAST(\"real\" AS double)) FILTER (WHERE \"is_finite\"(\"real\")) \"real$sum\"\n" + ", \"count\"(\"real\") FILTER (WHERE \"is_nan\"(\"real\")) \"real$nan_count\"\n" + ", \"count\"(\"real\") FILTER (WHERE (\"real\" = \"infinity\"())) \"real$pos_inf_count\"\n" + ", \"count\"(\"real\") FILTER (WHERE (\"real\" = -\"infinity\"())) \"real$neg_inf_count\"\n" + ", \"sum\"(\"array_sum\"(\"filter\"(\"double_array\", (x) -> \"is_finite\"(x)))) \"double_array$sum\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(\"double_array\", (x) -> \"is_nan\"(x)))) \"double_array$nan_count\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(\"double_array\", (x) -> (x = +\"infinity\"())))) \"double_array$pos_inf_count\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(\"double_array\", (x) -> (x = -\"infinity\"())))) \"double_array$neg_inf_count\"\n" + ", \"checksum\"(\"cardinality\"(\"double_array\")) \"double_array$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"double_array\")), 0) \"double_array$cardinality_sum\"\n" + ", \"sum\"(\"array_sum\"(\"filter\"(CAST(\"real_array\" AS array(double)), (x) -> \"is_finite\"(x)))) \"real_array$sum\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(CAST(\"real_array\" AS array(double)), (x) -> \"is_nan\"(x)))) \"real_array$nan_count\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(CAST(\"real_array\" AS array(double)), (x) -> (x = +\"infinity\"())))) \"real_array$pos_inf_count\"\n" + ", \"sum\"(\"cardinality\"(\"filter\"(CAST(\"real_array\" AS array(double)), (x) -> (x = -\"infinity\"())))) \"real_array$neg_inf_count\"\n" + ", \"checksum\"(\"cardinality\"(\"real_array\")) \"real_array$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"real_array\")), 0) \"real_array$cardinality_sum\"\n" + ", \"checksum\"(\"array_sort\"(\"int_array\")) \"int_array$checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"int_array\")) \"int_array$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"int_array\")), 0) \"int_array$cardinality_sum\"\n" + ", COALESCE(\"checksum\"(TRY(\"array_sort\"(\"row_array\"))), \"checksum\"(\"row_array\")) \"row_array$checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"row_array\")) \"row_array$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"row_array\")), 0) \"row_array$cardinality_sum\"\n" + ", \"checksum\"(\"map_array\") \"map_array$checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"map_array\")) \"map_array$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"map_array\")), 0) \"map_array$cardinality_sum\"\n" + ", \"checksum\"(\"map\") \"map$checksum\"\n" + ", \"checksum\"(\"array_sort\"(\"map_keys\"(\"map\"))) \"map$keys_checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"map\")) \"map$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"map\")), 0) \"map$cardinality_sum\"\n" + ", \"checksum\"(\"map_float_non_float\") \"map_float_non_float$checksum\"\n" + ", \"checksum\"(\"array_sort\"(\"map_keys\"(\"map_float_non_float\"))) \"map_float_non_float$keys_checksum\"\n" + ", \"checksum\"(\"array_sort\"(\"map_values\"(\"map_float_non_float\"))) \"map_float_non_float$values_checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"map_float_non_float\")) \"map_float_non_float$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"map_float_non_float\")), 0) \"map_float_non_float$cardinality_sum\"\n" + ", \"checksum\"(\"map_non_orderable\") \"map_non_orderable$checksum\"\n" + ", \"checksum\"(\"map_keys\"(\"map_non_orderable\")) \"map_non_orderable$keys_checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"map_non_orderable\")) \"map_non_orderable$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"map_non_orderable\")), 0) \"map_non_orderable$cardinality_sum\"\n" + ", \"checksum\"(\"row\".\"i\") \"row.i$checksum\"\n" + ", \"checksum\"(\"row\"[2]) \"row._col2$checksum\"\n" + ", \"sum\"(\"row\".\"d\") FILTER (WHERE \"is_finite\"(\"row\".\"d\")) \"row.d$sum\"\n" + ", \"count\"(\"row\".\"d\") FILTER (WHERE \"is_nan\"(\"row\".\"d\")) \"row.d$nan_count\"\n" + ", \"count\"(\"row\".\"d\") FILTER (WHERE (\"row\".\"d\" = \"infinity\"())) \"row.d$pos_inf_count\"\n" + ", \"count\"(\"row\".\"d\") FILTER (WHERE (\"row\".\"d\" = -\"infinity\"())) \"row.d$neg_inf_count\"\n" + ", \"checksum\"(\"array_sort\"(\"row\".\"a\")) \"row.a$checksum\"\n" + ", \"checksum\"(\"cardinality\"(\"row\".\"a\")) \"row.a$cardinality_checksum\"\n" + ", COALESCE(\"sum\"(\"cardinality\"(\"row\".\"a\")), 0) \"row.a$cardinality_sum\"\n" + ", \"sum\"(\"row\".\"r\"[1]) FILTER (WHERE \"is_finite\"(\"row\".\"r\"[1])) \"row.r._col1$sum\"\n" + ", \"count\"(\"row\".\"r\"[1]) FILTER (WHERE \"is_nan\"(\"row\".\"r\"[1])) \"row.r._col1$nan_count\"\n" + ", \"count\"(\"row\".\"r\"[1]) FILTER (WHERE (\"row\".\"r\"[1] = \"infinity\"())) \"row.r._col1$pos_inf_count\"\n" + ", \"count\"(\"row\".\"r\"[1]) FILTER (WHERE (\"row\".\"r\"[1] = -\"infinity\"())) \"row.r._col1$neg_inf_count\"\n" + ", \"checksum\"(\"row\".\"r\".\"b\") \"row.r.b$checksum\"\n" + "FROM\n" + " test:di\n", PARSING_OPTIONS); String[] arrayExpected = formatSql(expectedChecksumQuery, Optional.empty()).split("\n"); String[] arrayActual = formatSql(checksumQuery, Optional.empty()).split("\n"); Arrays.sort(arrayExpected); Arrays.sort(arrayActual); assertEquals(arrayActual, arrayExpected); }
Plugin create(Options.Plugin plugin) { try { return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument()); } catch (IOException | URISyntaxException e) { throw new CucumberException(e); } }
@Test void instantiates_pretty_plugin_with_file_arg() throws IOException { PluginOption option = parse("pretty:" + tmp.resolve("out.txt").toUri().toURL()); plugin = fc.create(option); assertThat(plugin.getClass(), is(equalTo(PrettyFormatter.class))); }
@Entrance public final void combine(@SourceFrom long count) { if (count > this.value) { this.value = count; } }
@Test public void testSelfCombine() { MaxLongMetricsImpl impl = new MaxLongMetricsImpl(); impl.combine(10); impl.combine(5); MaxLongMetricsImpl impl2 = new MaxLongMetricsImpl(); impl2.combine(2); impl2.combine(6); impl.combine(impl2); Assertions.assertEquals(10, impl.getValue()); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowIfCanNotCoerceToBoolean() { // Given: final KsqlJsonDeserializer<Boolean> deserializer = givenDeserializerForSchema(Schema.OPTIONAL_BOOLEAN_SCHEMA, Boolean.class); final byte[] bytes = serializeJson(IntNode.valueOf(23)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(startsWith( "Can't convert type. sourceType: IntNode, requiredType: BOOLEAN")))); }
@Override public void error(String msg) { logger.error(msg); }
@Test public void testErrorWithException() { Logger mockLogger = mock(Logger.class); when(mockLogger.getName()).thenReturn("foo"); InternalLogger logger = new Slf4JLogger(mockLogger); logger.error("a", e); verify(mockLogger).getName(); verify(mockLogger).error("a", e); }
static void checkValidTableId(String idToCheck) { if (idToCheck.length() < MIN_TABLE_ID_LENGTH) { throw new IllegalArgumentException("Table ID cannot be empty. "); } if (idToCheck.length() > MAX_TABLE_ID_LENGTH) { throw new IllegalArgumentException( "Table ID " + idToCheck + " cannot be longer than " + MAX_TABLE_ID_LENGTH + " characters."); } if (ILLEGAL_TABLE_CHARS.matcher(idToCheck).find()) { throw new IllegalArgumentException( "Table ID " + idToCheck + " is not a valid ID. Only letters, numbers, hyphens and underscores are allowed."); } }
@Test public void testCheckValidTableIdWhenIdIsTooShort() { assertThrows(IllegalArgumentException.class, () -> checkValidTableId("")); }
@Override public ChannelStateWriteResult getAndRemoveWriteResult(long checkpointId) { LOG.debug("{} requested write result, checkpoint {}", taskName, checkpointId); ChannelStateWriteResult result = results.remove(checkpointId); Preconditions.checkArgument( result != null, taskName + " channel state write result not found for checkpoint " + checkpointId); return result; }
@Test void testResultCompletion() throws IOException { ChannelStateWriteResult result; try (ChannelStateWriterImpl writer = openWriter()) { callStart(writer); result = writer.getAndRemoveWriteResult(CHECKPOINT_ID); assertThat(result.resultSubpartitionStateHandles).isNotDone(); assertThat(result.inputChannelStateHandles).isNotDone(); } assertThat(result.inputChannelStateHandles).isDone(); assertThat(result.resultSubpartitionStateHandles).isDone(); }
DateRange getRange(String dateRangeString) throws ParseException { if (dateRangeString == null || dateRangeString.isEmpty()) return null; String[] dateArr = dateRangeString.split("-"); if (dateArr.length > 2 || dateArr.length < 1) return null; // throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed"); ParsedCalendar from = parseDateString(dateArr[0]); ParsedCalendar to; if (dateArr.length == 2) to = parseDateString(dateArr[1]); else // faster and safe? // to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone()); to = parseDateString(dateArr[0]); try { return new DateRange(from, to); } catch (IllegalArgumentException ex) { return null; } }
@Test public void testParseSingleDateRange() throws ParseException { DateRange dateRange = dateRangeParser.getRange("2014 Sep 1"); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 31))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 1))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 30))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.OCTOBER, 1))); assertFalse(dateRange.isInRange(getCalendar(2015, Calendar.SEPTEMBER, 1))); }
@Override public void accept(Props props) { if (isClusterEnabled(props)) { checkClusterProperties(props); } }
@Test @UseDataProvider("validIPv4andIPv6Addresses") public void accept_throws_MessageException_on_application_node_if_default_jdbc_url(String host) { TestAppSettings settings = newSettingsForAppNode(host); settings.clearProperty(JDBC_URL.getKey()); ClusterSettings clusterSettings = new ClusterSettings(network); Props props = settings.getProps(); assertThatThrownBy(() -> clusterSettings.accept(props)) .isInstanceOf(MessageException.class) .hasMessage("Embedded database is not supported in cluster mode"); }
private synchronized boolean validateClientAcknowledgement(long h) { if (h < 0) { throw new IllegalArgumentException("Argument 'h' cannot be negative, but was: " + h); } if (h > MASK) { throw new IllegalArgumentException("Argument 'h' cannot be larger than 2^32 -1, but was: " + h); } final long oldH = clientProcessedStanzas.get(); final Long lastUnackedX = unacknowledgedServerStanzas.isEmpty() ? null : unacknowledgedServerStanzas.getLast().x; return validateClientAcknowledgement(h, oldH, lastUnackedX); }
@Test public void testValidateClientAcknowledgement_rollover_edgecase4_unsent() throws Exception { // Setup test fixture. final long MAX = new BigInteger( "2" ).pow( 32 ).longValue() - 1; final long h = 0; final long oldH = MAX; final Long lastUnackedX = null; // Execute system under test. final boolean result = StreamManager.validateClientAcknowledgement(h, oldH, lastUnackedX); // Verify results. assertFalse(result); }
@Override public UrlPattern doGetPattern() { return UrlPattern.create(composeUrlPattern(SAML_VALIDATION_CONTROLLER, VALIDATION_CALLBACK_KEY)); }
@Test public void do_get_pattern() { assertThat(underTest.doGetPattern().matches("/saml/validation")).isTrue(); assertThat(underTest.doGetPattern().matches("/saml/validation2")).isFalse(); assertThat(underTest.doGetPattern().matches("/api/saml/validation")).isFalse(); assertThat(underTest.doGetPattern().matches("/saml/validation_callback2")).isFalse(); assertThat(underTest.doGetPattern().matches("/saml/")).isFalse(); }
public abstract boolean compare(A actual, E expected);
@Test public void testTransforming_actual_compare_nullActualValue() { try { HYPHEN_INDEXES.compare(null, 7); fail("Expected NullPointerException to be thrown but wasn't"); } catch (NullPointerException expected) { } }
public static <T extends Throwable> void checkNotNull(final Object reference, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (null == reference) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckNotNullToNotThrowException() { assertDoesNotThrow(() -> ShardingSpherePreconditions.checkNotNull(new Object(), SQLException::new)); }
@Override public Connection getConnection() throws SQLException { if (null == currentPhysicalConnection) { currentPhysicalConnection = connection.getDatabaseConnectionManager().getRandomConnection(); } return currentPhysicalConnection; }
@Test void assertGetConnection() throws SQLException { assertThat(shardingSphereDatabaseMetaData.getConnection(), is(dataSource.getConnection())); }
public static ResourceModel processResource(final Class<?> resourceClass) { return processResource(resourceClass, null); }
@Test(expectedExceptions = ResourceConfigException.class) public void failsOnDuplicateFinderMethod() { @RestLiCollection(name = "duplicateFinderMethod") class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord> { @Finder(value = "duplicate") public List<EmptyRecord> findThis(@QueryParam("criteria") String criteria) { return Collections.emptyList(); } @Finder(value = "duplicate") public List<EmptyRecord> findThat(@QueryParam("criteria") String criteria) { return Collections.emptyList(); } } RestLiAnnotationReader.processResource(LocalClass.class); Assert.fail("#validateFinderMethod should fail throwing a ResourceConfigException"); }
@Override public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final S3Object object = new S3WriteFeature(session, acl).getDetails(file, status); // ID for the initiated multipart upload. final MultipartUpload multipart; try { final Path bucket = containerService.getContainer(file); multipart = session.getClient().multipartStartUpload( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } final MultipartOutputStream proxy = new MultipartOutputStream(multipart, file, status); return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("s3.upload.multipart.size")), new S3AttributesAdapter(session.getHost()), status) { @Override public StorageObject getStatus() { if(proxy.getResponse() != null) { if(log.isDebugEnabled()) { log.debug(String.format("Received response %s", proxy.getResponse())); } object.setContentLength(proxy.getOffset()); object.setETag(proxy.getResponse().getEtag()); if(proxy.getResponse().getVersionId() != null) { object.addMetadata(S3Object.S3_VERSION_ID, proxy.getResponse().getVersionId()); } } return object; } }; }
@Test public void testWrite() throws Exception { final S3MultipartWriteFeature feature = new S3MultipartWriteFeature(session, new S3AccessControlListFeature(session)); final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final TransferStatus status = new TransferStatus(); status.setLength(-1L); final Path file = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<StorageObject> out = feature.write(file, status, new DisabledConnectionCallback()); final byte[] content = RandomUtils.nextBytes(6 * 1024 * 1024); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); assertNotNull(out.getStatus()); assertEquals(content.length, out.getStatus().getContentLength()); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(file)); final PathAttributes attr = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(file); assertEquals(status.getResponse().getChecksum(), attr.getChecksum()); assertEquals(content.length, attr.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new S3ReadFeature(session).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public <T extends Output<T>> void save(Path csvPath, Dataset<T> dataset, String responseName) throws IOException { save(csvPath, dataset, Collections.singleton(responseName)); }
@Test public void testSaveEmpty() throws IOException { MutableDataset<MockOutput> src = new MutableDataset<>(null, new MockOutputFactory()); CSVSaver saver = new CSVSaver(); Path tmp = Files.createTempFile("foo-empty","csv"); tmp.toFile().deleteOnExit(); saver.save(tmp, src, "RESPONSE"); CSVLoader<MockOutput> loader = new CSVLoader<>(new MockOutputFactory()); MutableDataset<MockOutput> tgt = loader.load(tmp, "RESPONSE"); assertEquals(0, tgt.size()); }
@Bean public RateLimiterRegistry rateLimiterRegistry( RateLimiterConfigurationProperties rateLimiterProperties, EventConsumerRegistry<RateLimiterEvent> rateLimiterEventsConsumerRegistry, RegistryEventConsumer<RateLimiter> rateLimiterRegistryEventConsumer, @Qualifier("compositeRateLimiterCustomizer") CompositeCustomizer<RateLimiterConfigCustomizer> compositeRateLimiterCustomizer) { RateLimiterRegistry rateLimiterRegistry = createRateLimiterRegistry(rateLimiterProperties, rateLimiterRegistryEventConsumer, compositeRateLimiterCustomizer); registerEventConsumer(rateLimiterRegistry, rateLimiterEventsConsumerRegistry, rateLimiterProperties); initRateLimiterRegistry(rateLimiterProperties, compositeRateLimiterCustomizer, rateLimiterRegistry); return rateLimiterRegistry; }
@Test public void testRateLimiterRegistry() { io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties1 = new io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties(); instanceProperties1.setLimitForPeriod(2); instanceProperties1.setSubscribeForEvents(true); io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties2 = new io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties(); instanceProperties2.setLimitForPeriod(4); instanceProperties2.setSubscribeForEvents(true); RateLimiterConfigurationProperties rateLimiterConfigurationProperties = new RateLimiterConfigurationProperties(); rateLimiterConfigurationProperties.getInstances().put("backend1", instanceProperties1); rateLimiterConfigurationProperties.getInstances().put("backend2", instanceProperties2); rateLimiterConfigurationProperties.setRateLimiterAspectOrder(300); RateLimiterConfiguration rateLimiterConfiguration = new RateLimiterConfiguration(); DefaultEventConsumerRegistry<RateLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); RateLimiterRegistry rateLimiterRegistry = rateLimiterConfiguration .rateLimiterRegistry(rateLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeRateLimiterCustomizerTest()); assertThat(rateLimiterConfigurationProperties.getRateLimiterAspectOrder()).isEqualTo(300); assertThat(rateLimiterRegistry.getAllRateLimiters().size()).isEqualTo(2); RateLimiter rateLimiter = rateLimiterRegistry.rateLimiter("backend1"); assertThat(rateLimiter).isNotNull(); assertThat(rateLimiter.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(2); RateLimiter rateLimiter2 = rateLimiterRegistry.rateLimiter("backend2"); assertThat(rateLimiter2).isNotNull(); assertThat(rateLimiter2.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(4); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
@Override public Result reconcile(GcRequest request) { log.debug("Extension {} is being deleted", request); client.fetch(request.gvk(), request.name()) .filter(deletable()) .ifPresent(extension -> { var extensionStore = converter.convertTo(extension); storeClient.delete(extensionStore.getName(), extensionStore.getVersion()); // drop index for this extension var indexer = indexerFactory.getIndexer(extension.groupVersionKind()); indexer.unIndexRecord(request.name()); log.debug("Extension {} was deleted", request); }); return null; }
@Test void shouldDeleteCorrectly() { var fake = createExtension(); fake.getMetadata().setDeletionTimestamp(Instant.now()); fake.getMetadata().setFinalizers(null); when(client.fetch(fake.groupVersionKind(), fake.getMetadata().getName())) .thenReturn(Optional.of(convertTo(fake))); ExtensionStore store = new ExtensionStore(); store.setName("fake-store-name"); store.setVersion(1L); when(converter.convertTo(any())).thenReturn(store); var indexer = mock(Indexer.class); when(indexerFactory.getIndexer(any())).thenReturn(indexer); var result = reconciler.reconcile(createGcRequest()); assertNull(result); verify(converter).convertTo(any()); verify(storeClient).delete("fake-store-name", 1L); verify(indexer).unIndexRecord(eq(fake.getMetadata().getName())); }
@Override public EncryptRuleConfiguration buildToBeDroppedRuleConfiguration(final DropEncryptRuleStatement sqlStatement) { Collection<EncryptTableRuleConfiguration> toBeDroppedTables = new LinkedList<>(); Map<String, AlgorithmConfiguration> toBeDroppedEncryptors = new HashMap<>(); for (String each : sqlStatement.getTables()) { toBeDroppedTables.add(new EncryptTableRuleConfiguration(each, Collections.emptyList())); dropRule(each); } UnusedAlgorithmFinder.findUnusedEncryptor(rule.getConfiguration()).forEach(each -> toBeDroppedEncryptors.put(each, rule.getConfiguration().getEncryptors().get(each))); return new EncryptRuleConfiguration(toBeDroppedTables, toBeDroppedEncryptors); }
@Test void assertUpdateCurrentRuleConfigurationWithInUsedEncryptor() { EncryptRuleConfiguration ruleConfig = createCurrentRuleConfigurationWithMultipleTableRules(); EncryptRule rule = mock(EncryptRule.class); when(rule.getConfiguration()).thenReturn(ruleConfig); executor.setRule(rule); EncryptRuleConfiguration toBeDroppedRuleConfig = executor.buildToBeDroppedRuleConfiguration(createSQLStatement("T_ENCRYPT")); assertThat(toBeDroppedRuleConfig.getTables().size(), is(1)); assertTrue(toBeDroppedRuleConfig.getEncryptors().isEmpty()); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test(expected = NotfoundException.class) public void testListNotFoundFolderMinio() throws Exception { final Host host = new Host(new S3Protocol(), "play.min.io", new Credentials( "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" )) { @Override public String getProperty(final String key) { if("s3.bucket.virtualhost.disable".equals(key)) { return String.valueOf(true); } return super.getProperty(key); } }; final S3Session session = new S3Session(host); final LoginConnectionService login = new LoginConnectionService(new DisabledLoginCallback(), new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener()); login.check(session, new DisabledCancelCallback()); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(new DefaultHomeFinderService(session).find(), new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); try { new S3ObjectListService(session, acl).list(new Path(directory, new AsciiRandomStringService(30).random(), EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()); } finally { new S3DefaultDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
public void check( List<CheckResultInterface> remarks, TransMeta transMeta, StepMeta stepMeta, RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, Repository repository, IMetaStore metaStore ) { super.check( remarks, transMeta, stepMeta, prev, input, output, info, space, repository, metaStore ); CheckResult cr; // See if we get input... if ( input != null && input.length > 0 ) { cr = new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, "SalesforceUpsertMeta.CheckResult.NoInputExpected" ), stepMeta ); } else { cr = new CheckResult( CheckResult.TYPE_RESULT_OK, BaseMessages.getString( PKG, "SalesforceUpsertMeta.CheckResult.NoInput" ), stepMeta ); } remarks.add( cr ); // check return fields if ( getUpdateLookup().length == 0 ) { cr = new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, "SalesforceUpsertMeta.CheckResult.NoFields" ), stepMeta ); } else { cr = new CheckResult( CheckResult.TYPE_RESULT_OK, BaseMessages.getString( PKG, "SalesforceUpsertMeta.CheckResult.FieldsOk" ), stepMeta ); } remarks.add( cr ); }
@Test public void testCheck() { SalesforceUpsertMeta meta = new SalesforceUpsertMeta(); meta.setDefault(); List<CheckResultInterface> remarks = new ArrayList<CheckResultInterface>(); meta.check( remarks, null, null, null, null, null, null, null, null, null ); boolean hasError = false; for ( CheckResultInterface cr : remarks ) { if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) { hasError = true; } } assertFalse( remarks.isEmpty() ); assertTrue( hasError ); remarks.clear(); meta.setDefault(); meta.setUsername( "user" ); meta.setUpdateLookup( new String[]{ "SalesforceField" } ); meta.setUpdateStream( new String[]{ "StreamField" } ); meta.setUseExternalId( new Boolean[]{ false } ); meta.check( remarks, null, null, null, null, null, null, null, null, null ); hasError = false; for ( CheckResultInterface cr : remarks ) { if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) { hasError = true; } } assertFalse( remarks.isEmpty() ); assertFalse( hasError ); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testIdempotenceWithMultipleInflightsWhereFirstFailsFatallyAndSequenceOfFutureBatchesIsAdjusted() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0)); // Send first ProduceRequest Future<RecordMetadata> request1 = appendToAccumulator(tp0); sender.runOnce(); String nodeId = client.requests().peek().destination(); Node node = new Node(Integer.parseInt(nodeId), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertEquals(1, transactionManager.sequenceNumber(tp0)); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); // Send second ProduceRequest Future<RecordMetadata> request2 = appendToAccumulator(tp0); sender.runOnce(); assertEquals(2, client.inFlightRequestCount()); assertEquals(2, transactionManager.sequenceNumber(tp0)); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); assertFalse(request1.isDone()); assertFalse(request2.isDone()); assertTrue(client.isReady(node, time.milliseconds())); sendIdempotentProducerResponse(0, tp0, Errors.MESSAGE_TOO_LARGE, -1L); sender.runOnce(); // receive response 0, should adjust sequences of future batches. assertFutureFailure(request1, RecordTooLargeException.class); assertEquals(1, client.inFlightRequestCount()); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); sendIdempotentProducerResponse(1, tp0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1L); sender.runOnce(); // receive response 1 assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); assertEquals(0, client.inFlightRequestCount()); sender.runOnce(); // resend request 1 assertEquals(1, client.inFlightRequestCount()); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L); sender.runOnce(); // receive response 1 assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0)); assertEquals(0, client.inFlightRequestCount()); assertTrue(request1.isDone()); assertEquals(0, request2.get().offset()); }
@Override public R apply(R record) { final Matcher matcher = regex.matcher(record.topic()); if (matcher.matches()) { final String topic = matcher.replaceFirst(replacement); log.trace("Rerouting from topic '{}' to new topic '{}'", record.topic(), topic); return record.newRecord(topic, record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value(), record.timestamp()); } else { log.trace("Not rerouting topic '{}' as it does not match the configured regex", record.topic()); } return record; }
@Test public void slice() { assertEquals("index", apply("(.*)-(\\d\\d\\d\\d\\d\\d\\d\\d)", "$1", "index-20160117")); }
public String redirectWithCorrectAttributesForAd(HttpServletRequest httpRequest, AuthenticationRequest authenticationRequest) throws UnsupportedEncodingException, SamlSessionException { SamlSession samlSession = authenticationRequest.getSamlSession(); if (samlSession.getValidationStatus() != null && samlSession.getValidationStatus().equals(STATUS_INVALID.label)) { return cancelAuthenticationToAd(authenticationRequest, samlSession.getArtifact()); } else if (samlSession.getRequesterId() != null && samlSession.getRequesterId().equals(bvdEntityId)) { prepareAuthenticationToAdForBvd(samlSession); String bvdReturnUrl = generateReturnUrl(httpRequest, samlSession.getArtifact(), ENTRANCE_RETURN_URL_BVD); logger.info("Prepare authentication to Ad for Bvd"); return prepareAuthenticationToAd(bvdReturnUrl, authenticationRequest); } else { String adReturnUrl = generateReturnUrl(httpRequest, samlSession.getArtifact(), REDIRECT_WITH_ARTIFACT_URL); return prepareAuthenticationToAd(adReturnUrl, authenticationRequest); } }
@Test public void redirectWithCorrectAttributesToAdForBvDTest() throws UnsupportedEncodingException, SamlSessionException { AuthenticationRequest authenticationRequest = new AuthenticationRequest(); authenticationRequest.setRequest(httpServletRequestMock); SamlSession samlSession = new SamlSession(1L); samlSession.setRequesterId(bvdEntity); samlSession.setArtifact("artifact"); authenticationRequest.setSamlSession(samlSession); String result = authenticationEntranceService.redirectWithCorrectAttributesForAd(httpServletRequestMock, authenticationRequest); assertNotNull(result); assertNotNull(samlSession.getTransactionId()); assertEquals(AdAuthenticationStatus.STATUS_SUCCESS.label, authenticationRequest.getSamlSession().getAuthenticationStatus()); assertEquals(result, frontChannel); }
public static Number toNumber(Object value, Number defaultValue) { return convertQuietly(Number.class, value, defaultValue); }
@Test public void emptyToNumberTest() { final Object a = ""; final Number number = Convert.toNumber(a); assertNull(number); }
String buildCustomMessage(EventNotificationContext ctx, SlackEventNotificationConfig config, String template) throws PermanentEventNotificationException { final List<MessageSummary> backlog = getMessageBacklog(ctx, config); Map<String, Object> model = getCustomMessageModel(ctx, config.type(), backlog, config.timeZone()); try { LOG.debug("customMessage: template = {} model = {}", template, model); return templateEngine.transform(template, model); } catch (Exception e) { String error = "Invalid Custom Message template."; LOG.error(error + "[{}]", e.toString()); throw new PermanentEventNotificationException(error + e, e.getCause()); } }
@Test(expected = PermanentEventNotificationException.class) public void buildCustomMessageWithInvalidTemplate() throws EventNotificationException { slackEventNotificationConfig = buildInvalidTemplate(); slackEventNotification.buildCustomMessage(eventNotificationContext, slackEventNotificationConfig, "Title: ${does't exist}"); }
@Override public JsonElement serialize(Instant t, Type type, JsonSerializationContext jsc) { return new JsonPrimitive(serializeToString(t, ZoneId.systemDefault())); }
@Test public void testSerialize() { assertEquals( "2024-02-13T15:11:06+08:00", InstantTypeAdapter.serializeToString(Instant.ofEpochMilli(1707808266154L), ZoneOffset.ofHours(8)) ); }
@Override public boolean test(final Path test) { return this.equals(new CaseInsensitivePathPredicate(test)); }
@Test public void testPredicateTest() { final Path t = new Path("/f", EnumSet.of(Path.Type.file)); assertTrue(new CaseInsensitivePathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.file)))); assertEquals(new CaseInsensitivePathPredicate(t).hashCode(), new CaseInsensitivePathPredicate(new Path("/f", EnumSet.of(Path.Type.file))).hashCode()); assertTrue(new CaseInsensitivePathPredicate(t).test(new Path("/F", EnumSet.of(Path.Type.file)))); assertEquals(new CaseInsensitivePathPredicate(t).hashCode(), new CaseInsensitivePathPredicate(new Path("/F", EnumSet.of(Path.Type.file))).hashCode()); assertFalse(new CaseInsensitivePathPredicate(t).test(new Path("/f/a", EnumSet.of(Path.Type.file)))); assertFalse(new CaseInsensitivePathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.directory)))); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) { IdentityProvider provider = resolveProviderOrHandleResponse(request, response, CALLBACK_PATH); if (provider != null) { handleProvider(request, response, provider); } }
@Test public void redirect_when_failing_because_of_UnauthorizedExceptionException() throws Exception { FailWithUnauthorizedExceptionIdProvider identityProvider = new FailWithUnauthorizedExceptionIdProvider(); when(request.getRequestURI()).thenReturn("/oauth2/callback/" + identityProvider.getKey()); identityProviderRepository.addIdentityProvider(identityProvider); underTest.doFilter(request, response, chain); verify(response).sendRedirect("/sessions/unauthorized"); verify(authenticationEvent).loginFailure(eq(request), authenticationExceptionCaptor.capture()); AuthenticationException authenticationException = authenticationExceptionCaptor.getValue(); assertThat(authenticationException).hasMessage("Email john@email.com is already used"); assertThat(authenticationException.getSource()).isEqualTo(Source.oauth2(identityProvider)); assertThat(authenticationException.getLogin()).isNull(); assertThat(authenticationException.getPublicMessage()).isEqualTo("Email john@email.com is already used"); verify(oAuthRedirection).delete(request, response); verify(response).addCookie(cookieArgumentCaptor.capture()); Cookie cookie = cookieArgumentCaptor.getValue(); assertThat(cookie.getName()).isEqualTo("AUTHENTICATION-ERROR"); assertThat(cookie.getValue()).isEqualTo("Email%20john%40email.com%20is%20already%20used"); assertThat(cookie.getPath()).isEqualTo("/"); assertThat(cookie.isHttpOnly()).isFalse(); assertThat(cookie.getMaxAge()).isEqualTo(300); assertThat(cookie.isSecure()).isFalse(); }
@Override public String getName() { return "Poetry Analyzer"; }
@Test public void testPoetryLock() throws AnalysisException { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "poetry.lock")); analyzer.analyze(result, engine); assertEquals(88, engine.getDependencies().length); boolean found = false; for (Dependency d : engine.getDependencies()) { if ("urllib3".equals(d.getName())) { found = true; assertEquals("1.26.12", d.getVersion()); assertThat(d.getDisplayFileName(), equalTo("urllib3:1.26.12")); assertEquals(PythonDistributionAnalyzer.DEPENDENCY_ECOSYSTEM, d.getEcosystem()); } } assertTrue("Expeced to find PyYAML", found); }
@Override public void shutdown() { try { ksqlEngine.close(); } catch (final Exception e) { log.warn("Failed to cleanly shutdown the KSQL Engine", e); } try { serviceContext.close(); } catch (final Exception e) { log.warn("Failed to cleanly shutdown services", e); } }
@Test public void shouldCloseEngineOnStop() { // When: standaloneExecutor.shutdown(); // Then: verify(ksqlEngine).close(); }
public CompletableFuture<Optional<Topic>> getTopic(final String topic, boolean createIfMissing) { return getTopic(topic, createIfMissing, null); }
@Test public void testGetTopic() throws Exception { final String ns = "prop/ns-test"; admin.namespaces().createNamespace(ns, 2); final String topicName = ns + "/topic-1"; admin.topics().createNonPartitionedTopic(String.format("persistent://%s", topicName)); Producer<String> producer1 = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); producer1.close(); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getTopic(topicName.toString(), false).get().get(); persistentTopic.close().join(); List<String> topics = new ArrayList<>(pulsar.getBrokerService().getTopics().keys()); topics.removeIf(item -> item.contains(SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME)); Assert.assertEquals(topics.size(), 0); @Cleanup Consumer<String> consumer = pulsarClient.newConsumer(Schema.STRING) .topic(topicName) .subscriptionName("sub-1") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscriptionType(SubscriptionType.Shared) .subscribe(); }
public void printKsqlEntityList(final List<KsqlEntity> entityList) { switch (outputFormat) { case JSON: printAsJson(entityList); break; case TABULAR: final boolean showStatements = entityList.size() > 1; for (final KsqlEntity ksqlEntity : entityList) { writer().println(); if (showStatements) { writer().println(ksqlEntity.getStatementText()); } printAsTable(ksqlEntity); } break; default: throw new RuntimeException(String.format( "Unexpected output format: '%s'", outputFormat.name() )); } }
@Test public void shouldPrintTopicDescribeExtended() { // Given: final List<RunningQuery> readQueries = ImmutableList.of( new RunningQuery("read query", ImmutableSet.of("sink1"), ImmutableSet.of("sink1 topic"), new QueryId("readId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT) ); final List<RunningQuery> writeQueries = ImmutableList.of( new RunningQuery("write query", ImmutableSet.of("sink2"), ImmutableSet.of("sink2 topic"), new QueryId("writeId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT) ); final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( new SourceDescriptionEntity( "e", new SourceDescription( "TestSource", Optional.empty(), readQueries, writeQueries, buildTestSchema(SqlTypes.STRING), DataSourceType.KTABLE.getKsqlType(), "2000-01-01", "stats", "errors", true, "json", "avro", "kafka-topic", 2, 1, "sql statement text", ImmutableList.of( new QueryOffsetSummary( "consumer1", ImmutableList.of( new QueryTopicOffsetSummary( "kafka-topic", ImmutableList.of( new ConsumerPartitionOffsets(0, 100, 900, 800), new ConsumerPartitionOffsets(1, 50, 900, 900) )), new QueryTopicOffsetSummary( "kafka-topic-2", ImmutableList.of( new ConsumerPartitionOffsets(0, 0, 90, 80), new ConsumerPartitionOffsets(1, 10, 90, 90) )) )), new QueryOffsetSummary( "consumer2", ImmutableList.of()) ), ImmutableList.of("S1", "S2")), Collections.emptyList() )) ); // When: console.printKsqlEntityList(entityList); // Then: final String output = terminal.getOutputString(); Approvals.verify(output, approvalOptions); }
public Path getSubDir() { return this.subDir; }
@Test public void testGetSubDir() throws Exception { assertEquals(SUBDIR, deletionTask.getSubDir()); }
protected void setRequestPropertiesWithHeaderInfo(Map<String, String> headerMap, Object request) { // Try to obtain the unique name of the target service from the headerMap. // Due to the MOSN routing logic, it may be different from the original service unique name. if (request instanceof SofaRequest) { String headerService = headerMap.get(RemotingConstants.HEAD_SERVICE); if (headerService == null) { headerService = headerMap.get(RemotingConstants.HEAD_TARGET_SERVICE); } if (StringUtils.isNotBlank(headerService)) { ((SofaRequest) request).setTargetServiceUniqueName(headerService); } } }
@Test public void testSetRequestPropertiesWithHeaderInfo() { String service1 = "testService1"; String service2 = "testService2"; Map<String, String> headerMap = new HashMap(); headerMap.put(RemotingConstants.HEAD_TARGET_SERVICE, service1); SofaRequest sofaRequest = new SofaRequest(); SofaRpcSerialization sofaRpcSerialization = new SofaRpcSerialization(); sofaRpcSerialization.setRequestPropertiesWithHeaderInfo(headerMap, sofaRequest); Assert.assertEquals(service1, sofaRequest.getTargetServiceUniqueName()); headerMap.put(RemotingConstants.HEAD_SERVICE, service2); sofaRpcSerialization.setRequestPropertiesWithHeaderInfo(headerMap, sofaRequest); Assert.assertEquals(service2, sofaRequest.getTargetServiceUniqueName()); }
public static String gensalt(int log_rounds, SecureRandom random) { if (log_rounds < MIN_LOG_ROUNDS || log_rounds > MAX_LOG_ROUNDS) { throw new IllegalArgumentException("Bad number of rounds"); } StringBuilder rs = new StringBuilder(); byte rnd[] = new byte[BCRYPT_SALT_LEN]; random.nextBytes(rnd); rs.append("$2a$"); if (log_rounds < 10) { rs.append("0"); } rs.append(log_rounds); rs.append("$"); encode_base64(rnd, rnd.length, rs); return rs.toString(); }
@Test public void testGensaltTooLittleRounds() throws IllegalArgumentException { thrown.expect(IllegalArgumentException.class); BCrypt.gensalt(3); }
@ExecuteOn(TaskExecutors.IO) @Post(uri = "/{executionId}/labels") @Operation(tags = {"Executions"}, summary = "Add or update labels of a terminated execution") @ApiResponse(responseCode = "404", description = "If the execution cannot be found") @ApiResponse(responseCode = "400", description = "If the execution is not terminated") public HttpResponse<?> setLabels( @Parameter(description = "The execution id") @PathVariable String executionId, @Parameter(description = "The labels to add to the execution") @Body @NotNull @Valid List<Label> labels ) { Optional<Execution> maybeExecution = executionRepository.findById(tenantService.resolveTenant(), executionId); if (maybeExecution.isEmpty()) { return HttpResponse.notFound(); } Execution execution = maybeExecution.get(); if (!execution.getState().getCurrent().isTerminated()) { return HttpResponse.badRequest("The execution is not terminated"); } Execution newExecution = setLabels(execution, labels); return HttpResponse.ok(newExecution); }
@Test void setLabels() { // update label on a terminated execution Execution result = triggerInputsFlowExecution(true); assertThat(result.getState().getCurrent(), is(State.Type.SUCCESS)); Execution response = client.toBlocking().retrieve( HttpRequest.POST("/api/v1/executions/" + result.getId() + "/labels", List.of(new Label("key", "value"))), Execution.class ); assertThat(response.getLabels(), hasItem(new Label("key", "value"))); // update label on a not found execution var exception = assertThrows( HttpClientResponseException.class, () -> client.toBlocking().exchange(HttpRequest.POST("/api/v1/executions/notfound/labels", List.of(new Label("key", "value")))) ); assertThat(exception.getStatus(), is(HttpStatus.NOT_FOUND)); exception = assertThrows( HttpClientResponseException.class, () -> client.toBlocking().exchange(HttpRequest.POST("/api/v1/executions/" + result.getId() + "/labels", List.of(new Label(null, null)))) ); assertThat(exception.getStatus(), is(HttpStatus.UNPROCESSABLE_ENTITY)); }
@Override @SuppressFBWarnings({ "SERVLET_HEADER_REFERER", "SERVLET_HEADER_USER_AGENT" }) public String format(ContainerRequestType servletRequest, ContainerResponseType servletResponse, SecurityContext ctx) { //LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined StringBuilder logLineBuilder = new StringBuilder(); AwsProxyRequestContext gatewayContext = (AwsProxyRequestContext)servletRequest.getAttribute(API_GATEWAY_CONTEXT_PROPERTY); HttpApiV2ProxyRequestContext httpApiContext = (HttpApiV2ProxyRequestContext)servletRequest.getAttribute(HTTP_API_CONTEXT_PROPERTY); // %h logLineBuilder.append(servletRequest.getRemoteAddr()); logLineBuilder.append(" "); // %l if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } else { logLineBuilder.append("-"); } if (gatewayContext != null && gatewayContext.getIdentity() != null && gatewayContext.getIdentity().getUserArn() != null) { logLineBuilder.append(gatewayContext.getIdentity().getUserArn()); } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // %u if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } logLineBuilder.append(" "); // %t long timeEpoch = ZonedDateTime.now(clock).toEpochSecond(); if (gatewayContext != null && gatewayContext.getRequestTimeEpoch() > 0) { timeEpoch = gatewayContext.getRequestTimeEpoch() / 1000; } else if (httpApiContext != null && httpApiContext.getTimeEpoch() > 0) { timeEpoch = httpApiContext.getTimeEpoch() / 1000; } logLineBuilder.append( dateFormat.format(ZonedDateTime.of( LocalDateTime.ofEpochSecond(timeEpoch, 0, ZoneOffset.UTC), clock.getZone()) )); logLineBuilder.append(" "); // %r logLineBuilder.append("\""); logLineBuilder.append(servletRequest.getMethod().toUpperCase(Locale.ENGLISH)); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getRequestURI()); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getProtocol()); logLineBuilder.append("\" "); // %>s logLineBuilder.append(servletResponse.getStatus()); logLineBuilder.append(" "); // %b if (servletResponse instanceof AwsHttpServletResponse) { AwsHttpServletResponse awsResponse = (AwsHttpServletResponse)servletResponse; if (awsResponse.getAwsResponseBodyBytes().length > 0) { logLineBuilder.append(awsResponse.getAwsResponseBodyBytes().length); } else { logLineBuilder.append("-"); } } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // \"%{Referer}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("referer") != null) { logLineBuilder.append(servletRequest.getHeader("referer")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); // \"%{User-agent}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("user-agent") != null) { logLineBuilder.append(servletRequest.getHeader("user-agent")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); logLineBuilder.append("combined"); return logLineBuilder.toString(); }
@Test void logsCurrentTimeWhenContextNull() { // given proxyRequest.setRequestContext(null); // when String actual = sut.format(mockServletRequest, mockServletResponse, null); // then assertThat(actual, containsString("[07/02/1991:01:02:03Z]")); }
public String getRequestId() { return requestId; }
@Test public void testGetRequestId() { String requestId = requestContext.getRequestId(); assertNotNull(requestId); assertNotNull(UUID.fromString(requestId)); requestContext.setRequestId("testRequestId"); assertEquals("testRequestId", requestContext.getRequestId()); }
public boolean removeIf(Predicate<? super Entry<K, V>> filter) { checkMutability(); return _map.entrySet().removeIf(filter); }
@Test public void testRemoveIf() { final DataMap map = new DataMap(); map.put("key1", 100); map.put("key2", 200); map.put("key3", 500); Assert.assertFalse(map.removeIf(entry -> entry.getKey().equals("Unknown"))); Assert.assertTrue(map.removeIf(entry -> entry.getKey().equals("key2") || ((Integer) entry.getValue() == 100))); Assert.assertEquals(map, Collections.singletonMap("key3", 500)); }
static @Nullable String resolveConsumerArn(Read spec, PipelineOptions options) { String streamName = Preconditions.checkArgumentNotNull(spec.getStreamName()); KinesisIOOptions sourceOptions = options.as(KinesisIOOptions.class); Map<String, String> streamToArnMapping = sourceOptions.getKinesisIOConsumerArns(); String consumerArn; if (streamToArnMapping.containsKey(streamName)) { consumerArn = streamToArnMapping.get(streamName); // can resolve to null too } else { consumerArn = spec.getConsumerArn(); } return consumerArn; }
@Test public void testConsumerArnPassedInIO() { KinesisIO.Read readSpec = KinesisIO.read().withStreamName("stream-xxx").withConsumerArn("arn::consumer-yyy"); KinesisIOOptions options = createIOOptions(); assertThat(KinesisSource.resolveConsumerArn(readSpec, options)).isEqualTo("arn::consumer-yyy"); }
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) { HttpHeaders inHeaders = in.headers(); final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size()); if (in instanceof HttpRequest) { HttpRequest request = (HttpRequest) in; String host = inHeaders.getAsString(HttpHeaderNames.HOST); if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) { out.path(new AsciiString(request.uri())); setHttp2Scheme(inHeaders, out); } else { URI requestTargetUri = URI.create(request.uri()); out.path(toHttp2Path(requestTargetUri)); // Take from the request-line if HOST header was empty host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host; setHttp2Scheme(inHeaders, requestTargetUri, out); } setHttp2Authority(host, out); out.method(request.method().asciiName()); } else if (in instanceof HttpResponse) { HttpResponse response = (HttpResponse) in; out.status(response.status().codeAsText()); } // Add the HTTP headers which have not been consumed above toHttp2Headers(inHeaders, out); return out; }
@Test public void connectionSpecificHeadersShouldBeRemoved() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(CONNECTION, "keep-alive"); inHeaders.add(HOST, "example.com"); @SuppressWarnings("deprecation") AsciiString keepAlive = KEEP_ALIVE; inHeaders.add(keepAlive, "timeout=5, max=1000"); @SuppressWarnings("deprecation") AsciiString proxyConnection = PROXY_CONNECTION; inHeaders.add(proxyConnection, "timeout=5, max=1000"); inHeaders.add(TRANSFER_ENCODING, "chunked"); inHeaders.add(UPGRADE, "h2c"); Http2Headers outHeaders = new DefaultHttp2Headers(); HttpConversionUtil.toHttp2Headers(inHeaders, outHeaders); assertFalse(outHeaders.contains(CONNECTION)); assertFalse(outHeaders.contains(HOST)); assertFalse(outHeaders.contains(keepAlive)); assertFalse(outHeaders.contains(proxyConnection)); assertFalse(outHeaders.contains(TRANSFER_ENCODING)); assertFalse(outHeaders.contains(UPGRADE)); }
public ExitStatus(Options options) { this.options = options; }
@Test void with_undefined_scenarios() { createRuntime(); bus.send(testCaseFinishedWithStatus(Status.UNDEFINED)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1))); }
@Override public V remove() { V value = poll(); if (value == null) { throw new NoSuchElementException(); } return value; }
@Test public void testRemove() throws InterruptedException { RBlockingQueue<String> blockingFairQueue = redisson.getBlockingQueue("delay_queue"); RDelayedQueue<String> delayedQueue = redisson.getDelayedQueue(blockingFairQueue); delayedQueue.offer("1_1_1", 3, TimeUnit.SECONDS); delayedQueue.offer("1_1_2", 7, TimeUnit.SECONDS); assertThat(delayedQueue.contains("1_1_1")).isTrue(); assertThat(delayedQueue.remove("1_1_1")).isTrue(); assertThat(delayedQueue.contains("1_1_1")).isFalse(); Thread.sleep(9000); assertThat(blockingFairQueue).containsOnly("1_1_2"); }
public Search addStreamsToQueriesWithoutStreams(Supplier<Set<String>> defaultStreamsSupplier) { if (!hasQueriesWithoutStreams()) { return this; } final Set<Query> withStreams = queries().stream().filter(Query::hasStreams).collect(toSet()); final Set<Query> withoutStreams = Sets.difference(queries(), withStreams); final Set<String> defaultStreams = defaultStreamsSupplier.get(); if (defaultStreams.isEmpty()) { throw new MissingStreamPermissionException("User doesn't have access to any streams", Collections.emptySet()); } final Set<Query> withDefaultStreams = withoutStreams.stream() .map(q -> q.addStreamsToFilter(defaultStreams)) .collect(toSet()); final ImmutableSet<Query> newQueries = Sets.union(withStreams, withDefaultStreams).immutableCopy(); return toBuilder().queries(newQueries).build(); }
@Test void throwsExceptionIfQueryHasNoStreamsAndThereAreNoDefaultStreams() { Search search = searchWithQueriesWithStreams("a,b,c", ""); assertThatExceptionOfType(MissingStreamPermissionException.class) .isThrownBy(() -> search.addStreamsToQueriesWithoutStreams(ImmutableSet::of)) .satisfies(ex -> assertThat(ex.streamsWithMissingPermissions()).isEmpty()); }
public static Throwable getRootCause(Throwable throwable) { if (throwable == null) { return null; } Throwable rootCause = throwable; // this is to avoid infinite loops for recursive cases final Set<Throwable> seenThrowables = new HashSet<>(); seenThrowables.add(rootCause); while ((rootCause.getCause() != null && !seenThrowables.contains(rootCause.getCause()))) { seenThrowables.add(rootCause.getCause()); rootCause = rootCause.getCause(); } return rootCause; }
@Test void rootCauseIsSelf() { Throwable e = new Exception(); Throwable rootCause = ExceptionUtils.getRootCause(e); assertThat(rootCause).isSameAs(e); }
@Override public void check(Model model) { if (model == null) return; List<Model> secondPhaseModels = new ArrayList<>(); deepFindAllModelsOfType(AppenderModel.class, secondPhaseModels, model); deepFindAllModelsOfType(LoggerModel.class, secondPhaseModels, model); deepFindAllModelsOfType(RootLoggerModel.class, secondPhaseModels, model); List<Pair<Model, Model>> nestedPairs = deepFindNestedSubModelsOfType(IfModel.class, secondPhaseModels); if (nestedPairs.isEmpty()) return; addWarn("<if> elements cannot be nested within an <appender>, <logger> or <root> element"); addWarn("See also " + NESTED_IF_WARNING_URL); for (Pair<Model, Model> pair : nestedPairs) { Model p = pair.first; int pLine = p.getLineNumber(); Model s = pair.second; int sLine = s.getLineNumber(); addWarn("Element <"+p.getTag()+"> at line " + pLine + " contains a nested <"+s.getTag()+"> element at line " +sLine); } }
@Test public void smoke() { ClassicTopModel topModel = new ClassicTopModel(); inwspeChecker.check(topModel); statusChecker.assertIsWarningOrErrorFree(); }
public void createSecret(String secretId, String secretData) { checkArgument(!secretId.isEmpty(), "secretId can not be empty"); checkArgument(!secretData.isEmpty(), "secretData can not be empty"); try { checkIsUsable(); ProjectName projectName = ProjectName.of(projectId); // Create the parent secret. Secret secret = Secret.newBuilder() .setReplication( Replication.newBuilder() .setAutomatic(Replication.Automatic.newBuilder().build()) .build()) .build(); Secret createdSecret = secretManagerServiceClient.createSecret(projectName, secretId, secret); // Add a secret version. SecretPayload payload = SecretPayload.newBuilder().setData(ByteString.copyFromUtf8(secretData)).build(); secretManagerServiceClient.addSecretVersion(createdSecret.getName(), payload); createdSecretIds.add(secretId); LOG.info("Created secret successfully."); } catch (Exception e) { throw new SecretManagerResourceManagerException("Error while creating secret", e); } }
@Test public void testCreateSecretShouldCreate() { Secret secret = Secret.getDefaultInstance(); when(secretManagerServiceClient.createSecret( any(ProjectName.class), any(String.class), any(Secret.class))) .thenReturn(secret); testManager.createSecret(SECRET_ID, SECRET_DATA); verify(secretManagerServiceClient) .createSecret( projectNameCaptor.capture(), secretNameStringCaptor.capture(), secretCaptor.capture()); ProjectName actualProjectName = projectNameCaptor.getValue(); assertThat(actualProjectName.getProject()).isEqualTo(PROJECT_ID); assertThat(secretNameStringCaptor.getValue()).matches(SECRET_ID); }
public static boolean isRemovable(DockerContainerStatus containerStatus) { return !containerStatus.equals(DockerContainerStatus.NONEXISTENT) && !containerStatus.equals(DockerContainerStatus.UNKNOWN) && !containerStatus.equals(DockerContainerStatus.REMOVING) && !containerStatus.equals(DockerContainerStatus.RUNNING); }
@Test public void testIsRemovable() { assertTrue(DockerCommandExecutor.isRemovable( DockerContainerStatus.STOPPED)); assertTrue(DockerCommandExecutor.isRemovable( DockerContainerStatus.RESTARTING)); assertTrue(DockerCommandExecutor.isRemovable( DockerContainerStatus.EXITED)); assertTrue(DockerCommandExecutor.isRemovable( DockerContainerStatus.CREATED)); assertTrue(DockerCommandExecutor.isRemovable( DockerContainerStatus.DEAD)); assertFalse(DockerCommandExecutor.isRemovable( DockerContainerStatus.NONEXISTENT)); assertFalse(DockerCommandExecutor.isRemovable( DockerContainerStatus.REMOVING)); assertFalse(DockerCommandExecutor.isRemovable( DockerContainerStatus.UNKNOWN)); assertFalse(DockerCommandExecutor.isRemovable( DockerContainerStatus.RUNNING)); }
@Override public void set(String name, String value) { checkKey(name); String[] keyParts = splitKey(name); String ns = registry.getNamespaceURI(keyParts[0]); if (ns != null) { try { xmpData.setProperty(ns, keyParts[1], value); } catch (XMPException e) { // Ignore } } }
@Test public void set_simplePropWithMultipleValues_throw() { assertThrows(PropertyTypeException.class, () -> { xmpMeta.set(TikaCoreProperties.FORMAT, new String[]{"value1", "value2"}); }); }
@SuppressWarnings("checkstyle:MissingSwitchDefault") @Override protected void doCommit(TableMetadata base, TableMetadata metadata) { int version = currentVersion() + 1; CommitStatus commitStatus = CommitStatus.FAILURE; /* This method adds no fs scheme, and it persists in HTS that way. */ final String newMetadataLocation = rootMetadataFileLocation(metadata, version); HouseTable houseTable = HouseTable.builder().build(); try { // Now that we have metadataLocation we stamp it in metadata property. Map<String, String> properties = new HashMap<>(metadata.properties()); failIfRetryUpdate(properties); String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli()); properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString); if (base == null) { properties.put(getCanonicalFieldName("creationTime"), currentTsString); } properties.put( getCanonicalFieldName("tableVersion"), properties.getOrDefault( getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION)); properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation); String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY); String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY); boolean isStageCreate = Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY)); logPropertiesMap(properties); TableMetadata updatedMetadata = metadata.replaceProperties(properties); if (serializedSnapshotsToPut != null) { List<Snapshot> snapshotsToPut = SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut); Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff = SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots()); List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst(); List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond(); snapshotInspector.validateSnapshotsUpdate( updatedMetadata, appendedSnapshots, deletedSnapshots); Map<String, SnapshotRef> snapshotRefs = serializedSnapshotRefs == null ? new HashMap<>() : SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs); updatedMetadata = maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true); updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots); } final TableMetadata updatedMtDataRef = updatedMetadata; metricsReporter.executeWithStats( () -> TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)), InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY); houseTable = houseTableMapper.toHouseTable(updatedMetadata); if (!isStageCreate) { houseTableRepository.save(houseTable); } else { /** * Refresh current metadata for staged tables from newly created metadata file and disable * "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata, * TableMetadata)} */ refreshFromMetadataLocation(newMetadataLocation); } commitStatus = CommitStatus.SUCCESS; } catch (InvalidIcebergSnapshotException e) { throw new BadRequestException(e, e.getMessage()); } catch (CommitFailedException e) { throw e; } catch (HouseTableCallerException | HouseTableNotFoundException | HouseTableConcurrentUpdateException e) { throw new CommitFailedException(e); } catch (Throwable persistFailure) { // Try to reconnect and determine the commit status for unknown exception log.error( "Encounter unexpected error while updating metadata.json for table:" + tableIdentifier, persistFailure); commitStatus = checkCommitStatus(newMetadataLocation, metadata); switch (commitStatus) { case SUCCESS: log.debug("Calling doCommit succeeded"); break; case FAILURE: // logging error and exception-throwing co-existence is needed, given the exception // handler in // org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow // the // nested exception information. log.error("Exception details:", persistFailure); throw new CommitFailedException( persistFailure, String.format( "Persisting metadata file %s at version %s for table %s failed while persisting to house table", newMetadataLocation, version, GSON.toJson(houseTable))); case UNKNOWN: throw new CommitStateUnknownException(persistFailure); } } finally { switch (commitStatus) { case FAILURE: metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR); break; case UNKNOWN: metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN); break; default: break; /*should never happen, kept to silence SpotBugs*/ } } }
@Test void testDoCommitAppendSnapshotsExistingVersion() throws IOException { List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots(); // add 1 snapshot to the base metadata TableMetadata base = TableMetadata.buildFrom(BASE_TABLE_METADATA) .setBranchSnapshot(testSnapshots.get(0), SnapshotRef.MAIN_BRANCH) .build(); Map<String, String> properties = new HashMap<>(base.properties()); try (MockedStatic<TableMetadataParser> ignoreWriteMock = Mockito.mockStatic(TableMetadataParser.class)) { // add all snapshots to new metadata properties.put( CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(testSnapshots)); properties.put( CatalogConstants.SNAPSHOTS_REFS_KEY, SnapshotsUtil.serializeMap( IcebergTestUtil.obtainSnapshotRefsFromSnapshot( testSnapshots.get(testSnapshots.size() - 1)))); properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION); TableMetadata metadata = base.replaceProperties(properties); openHouseInternalTableOperations.doCommit(base, metadata); Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture()); Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties(); Assertions.assertEquals( 4, updatedProperties.size()); /*location, lastModifiedTime, version and deleted_snapshots*/ Assertions.assertEquals( TEST_LOCATION, updatedProperties.get(getCanonicalFieldName("tableVersion"))); // verify only 3 snapshots are added Assertions.assertEquals( testSnapshots.subList(1, 4).stream() .map(s -> Long.toString(s.snapshotId())) .collect(Collectors.joining(",")), updatedProperties.get(getCanonicalFieldName("appended_snapshots"))); Assertions.assertTrue(updatedProperties.containsKey(getCanonicalFieldName("tableLocation"))); Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable)); } }
public void changeFieldType(final CustomFieldMapping customMapping, final Set<String> indexSetsIds, final boolean rotateImmediately) { checkFieldTypeCanBeChanged(customMapping.fieldName()); checkType(customMapping); checkAllIndicesSupportFieldTypeChange(customMapping.fieldName(), indexSetsIds); for (String indexSetId : indexSetsIds) { try { indexSetService.get(indexSetId).ifPresent(indexSetConfig -> { var updatedIndexSetConfig = storeMapping(customMapping, indexSetConfig); if (rotateImmediately) { updatedIndexSetConfig.ifPresent(this::cycleIndexSet); } }); } catch (Exception ex) { LOG.error("Failed to update field type in index set : " + indexSetId, ex); throw ex; } } }
@Test void testThrowsExceptionOnIndexThatCannotHaveFieldTypeChanged() { IndexSetConfig illegalForFieldTypeChange = mock(IndexSetConfig.class); doReturn(Optional.of(illegalForFieldTypeChange)).when(indexSetService).get("existing_index_set"); assertThrows(BadRequestException.class, () -> toTest.changeFieldType(newCustomMapping, Set.of("existing_index_set"), false)); verifyNoInteractions(mongoIndexSetService); }
public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) throws IOException { for(int i=off; i < off+len; i++) { switch (buffer[i]) { case '&': output.write(AMP_BYTES); break; case '<': output.write(LT_BYTES); break; case '>': output.write(GT_BYTES); break; case '\'': output.write(APOS_BYTES); break; case '"': output.write(QUOT_BYTES); break; default: output.write(buffer, i, 1); } } }
@Test public void testQuoting() throws Exception { assertEquals("ab&lt;cd", HtmlQuoting.quoteHtmlChars("ab<cd")); assertEquals("ab&gt;", HtmlQuoting.quoteHtmlChars("ab>")); assertEquals("&amp;&amp;&amp;", HtmlQuoting.quoteHtmlChars("&&&")); assertEquals(" &apos;\n", HtmlQuoting.quoteHtmlChars(" '\n")); assertEquals("&quot;", HtmlQuoting.quoteHtmlChars("\"")); assertEquals(null, HtmlQuoting.quoteHtmlChars(null)); }
public List<QueuePath> getWildcardedQueuePaths(int maxAutoCreatedQueueDepth) { List<QueuePath> wildcardedPaths = new ArrayList<>(); // Start with the most explicit format (without wildcard) wildcardedPaths.add(this); String[] pathComponents = getPathComponents(); int supportedWildcardLevel = getSupportedWildcardLevel(maxAutoCreatedQueueDepth); // Collect all template entries for (int wildcardLevel = 1; wildcardLevel <= supportedWildcardLevel; wildcardLevel++) { int wildcardedComponentIndex = pathComponents.length - wildcardLevel; pathComponents[wildcardedComponentIndex] = WILDCARD_QUEUE; QueuePath wildcardedPath = createFromQueues(pathComponents); wildcardedPaths.add(wildcardedPath); } return wildcardedPaths; }
@Test public void testWildcardedQueuePathsWithOneLevelWildCard() { int maxAutoCreatedQueueDepth = 1; List<QueuePath> expectedPaths = new ArrayList<>(); expectedPaths.add(TEST_QUEUE_PATH); expectedPaths.add(ONE_LEVEL_WILDCARDED_TEST_PATH); List<QueuePath> wildcardedPaths = TEST_QUEUE_PATH .getWildcardedQueuePaths(maxAutoCreatedQueueDepth); Assert.assertEquals(expectedPaths, wildcardedPaths); }
public static void setFromConfiguration(Configuration configuration) { final FlinkSecurityManager flinkSecurityManager = FlinkSecurityManager.fromConfiguration(configuration); if (flinkSecurityManager != null) { try { System.setSecurityManager(flinkSecurityManager); } catch (Exception e) { throw new IllegalConfigurationException( String.format( "Could not register security manager due to no permission to " + "set a SecurityManager. Either update your existing " + "SecurityManager to allow the permission or do not use " + "security manager features (e.g., '%s: %s', '%s: %s')", ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT.key(), ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT.defaultValue(), ClusterOptions.HALT_ON_FATAL_ERROR.key(), ClusterOptions.HALT_ON_FATAL_ERROR.defaultValue()), e); } } FlinkSecurityManager.flinkSecurityManager = flinkSecurityManager; }
@Test void testRegistrationNotAllowedByExistingSecurityManager() { Configuration configuration = new Configuration(); configuration.set( ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT, ClusterOptions.UserSystemExitMode.THROW); System.setSecurityManager( new SecurityManager() { private boolean fired; @Override public void checkPermission(Permission perm) { if (!fired && perm.getName().equals("setSecurityManager")) { try { throw new SecurityException("not allowed"); } finally { // Allow removing this manager again fired = true; } } } }); assertThatThrownBy(() -> FlinkSecurityManager.setFromConfiguration(configuration)) .isInstanceOf(IllegalConfigurationException.class) .hasMessageContaining("Could not register security manager"); }
@Override public long remainTimeToLive(K key) { return get(remainTimeToLiveAsync(key)); }
@Test public void testRemainTimeToLive() { RMapCache<String, String> map = redisson.getMapCache("test"); map.put("1", "2", 2, TimeUnit.SECONDS); assertThat(map.remainTimeToLive("1")).isBetween(1900L, 2000L); map.put("3", "4"); assertThat(map.remainTimeToLive("3")).isEqualTo(-1); assertThat(map.remainTimeToLive("0")).isEqualTo(-2); map.put("5", "6", 20, TimeUnit.SECONDS, 10, TimeUnit.SECONDS); assertThat(map.remainTimeToLive("1")).isLessThan(9900); map.destroy(); }
public FEELFnResult<String> invoke(@ParameterName("from") Object val) { if ( val == null ) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) ); } }
@Test void invokeNull() { FunctionTestUtil.assertResult(stringFunction.invoke(null), null); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key); StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next(); final Window wnd = next.key.window(); if (!windowStart.contains(wnd.startTime())) { continue; } if (!windowEnd.contains(wnd.endTime())) { continue; } final long rowTime = wnd.end(); final WindowedRow row = WindowedRow.of( stateStore.schema(), next.key, next.value, rowTime ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValueIfSessionEndsAtUpperBoundIfUpperBoundClosed() { // Given: final Range<Instant> endBounds = Range.closed( LOWER_INSTANT, UPPER_INSTANT ); final Instant wstart = UPPER_INSTANT.minusMillis(1); givenSingleSession(wstart, UPPER_INSTANT); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(A_KEY, PARTITION, Range.all(), endBounds); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is( WindowedRow.of( SCHEMA, sessionKey(wstart, UPPER_INSTANT), A_VALUE, UPPER_INSTANT.toEpochMilli() ) )); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
public static Comparator<StructLike> forType(Types.StructType struct) { return new StructLikeComparator(struct); }
@Test public void testDouble() { assertComparesCorrectly(Comparators.forType(Types.DoubleType.get()), 0.1d, 0.2d); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { final Path found = this.search(file, listener); return found != null; } catch(NotfoundException e) { if(log.isDebugEnabled()) { log.debug(String.format("Parent directory for file %s not found", file)); } return false; } }
@Test public void testFindByType() throws Exception { final DefaultFindFeature feature = new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) { @Override public AttributedList<Path> list(Path file, ListProgressListener listener) { return new AttributedList<>(Collections.singletonList(new Path("/a", EnumSet.of(Path.Type.file)))); } }); assertFalse(feature.find(new Path("/a", EnumSet.of(Path.Type.directory)))); assertTrue(feature.find(new Path("/a", EnumSet.of(Path.Type.file)))); }
public Map<String, Object> getConfigurationByPluginTypeOrAliases(final String pluginType, final Class<? extends Plugin> plugin) { Map<String, Object> configuration = getConfigurationByPluginType(pluginType); if (configuration.isEmpty()) { // let's check if the plugin-configuration was provided using type alias. Set<String> aliases = Plugin.getAliases(plugin); for (String alias: aliases) { configuration = getConfigurationByPluginType(alias); if (!configuration.isEmpty()) { break; // non-empty configuration was found for a plugin alias. } } } return configuration; }
@Test void shouldGetConfigurationForAlias() { // Given Map<String, Object> config = Map.of( "prop1", "v1", "prop2", "v1", "prop3", "v1" ); PluginConfigurations configurations = new PluginConfigurations(List.of( new PluginConfiguration(0, "io.kestra.core.runners.test.task.Alias", config) )); Map<String, Object> result = configurations.getConfigurationByPluginTypeOrAliases(new TaskWithAlias().getType(), TaskWithAlias.class); Assertions.assertEquals(config, result); }
public final Sensor storeLevelSensor(final String taskId, final String storeName, final String sensorSuffix, final RecordingLevel recordingLevel, final Sensor... parents) { final String sensorPrefix = storeSensorPrefix(Thread.currentThread().getName(), taskId, storeName); // since the keys in the map storeLevelSensors contain the name of the current thread and threads only // access keys in which their name is contained, the value in the maps do not need to be thread safe // and we can use a LinkedList here. // TODO: In future, we could use thread local maps since each thread will exclusively access the set of keys // that contain its name. Similar is true for the other metric levels. Thread-level metrics need some // special attention, since they are created before the thread is constructed. The creation of those // metrics could be moved into the run() method of the thread. return getSensors(storeLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents); }
@Test public void shouldNotUseSameStoreLevelSensorKeyWithDifferentTaskIds() { final Metrics metrics = mock(Metrics.class); final ArgumentCaptor<String> sensorKeys = setUpSensorKeyTests(metrics); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); streamsMetrics.storeLevelSensor(TASK_ID1, STORE_NAME1, SENSOR_NAME_1, INFO_RECORDING_LEVEL); streamsMetrics.storeLevelSensor(TASK_ID2, STORE_NAME1, SENSOR_NAME_1, INFO_RECORDING_LEVEL); assertThat(sensorKeys.getAllValues().get(0), not(sensorKeys.getAllValues().get(1))); }
public static <T extends NamedConfig> T getConfig(ConfigPatternMatcher configPatternMatcher, Map<String, T> configs, String name, Class clazz) { return getConfig(configPatternMatcher, configs, name, clazz, (BiConsumer<T, String>) DEFAULT_NAME_SETTER); }
@Test public void getNonExistingConfig_createNewWithCloningDefault() { QueueConfig aDefault = new QueueConfig("default"); aDefault.setBackupCount(5); queueConfigs.put(aDefault.getName(), aDefault); QueueConfig newConfig = ConfigUtils.getConfig(configPatternMatcher, queueConfigs, "newConfig", QueueConfig.class); assertEquals("newConfig", newConfig.getName()); assertEquals(5, newConfig.getBackupCount()); assertEquals(2, queueConfigs.size()); assertTrue(queueConfigs.containsKey("newConfig")); assertTrue(queueConfigs.containsKey("default")); }
public boolean containsTableSubquery() { return getSqlStatement().getFrom().isPresent() && getSqlStatement().getFrom().get() instanceof SubqueryTableSegment || getSqlStatement().getWithSegment().isPresent(); }
@Test void assertContainsEnhancedTable() { SelectStatement selectStatement = new MySQLSelectStatement(); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setFrom(new SubqueryTableSegment(0, 0, new SubquerySegment(0, 0, createSubSelectStatement(), ""))); ShardingSphereMetaData metaData = new ShardingSphereMetaData(Collections.singletonMap(DefaultDatabase.LOGIC_NAME, mockDatabase()), mock(ResourceMetaData.class), mock(RuleMetaData.class), mock(ConfigurationProperties.class)); SelectStatementContext actual = new SelectStatementContext(metaData, Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); assertTrue(actual.containsTableSubquery()); }
@Override @ManagedOperation(description = "Clear the store") public void clear() { cache.clear(); }
@Test void testClear() { // add key to remove assertTrue(repo.add(key01)); assertTrue(repo.confirm(key01)); // remove key assertTrue(repo.remove(key01)); assertFalse(repo.confirm(key01)); // try to remove a key that isn't there repo.remove(key02); }
synchronized void add(int splitCount) { int pos = count % history.length; history[pos] = splitCount; count += 1; }
@Test public void testNotFullHistory() { EnumerationHistory history = new EnumerationHistory(3); history.add(1); history.add(2); int[] expectedHistorySnapshot = {1, 2}; testHistory(history, expectedHistorySnapshot); }
@Override public KubevirtNetwork network(String networkId) { checkArgument(!Strings.isNullOrEmpty(networkId), ERR_NULL_NETWORK_ID); return networkStore.network(networkId); }
@Test public void testGetNetworkById() { createBasicNetworks(); assertNotNull("Network did not match", target.network(NETWORK_ID)); assertNull("Network did not match", target.network(UNKNOWN_ID)); }
public final void containsNoDuplicates() { List<Multiset.Entry<?>> duplicates = newArrayList(); for (Multiset.Entry<?> entry : LinkedHashMultiset.create(checkNotNull(actual)).entrySet()) { if (entry.getCount() > 1) { duplicates.add(entry); } } if (!duplicates.isEmpty()) { failWithoutActual( simpleFact("expected not to contain duplicates"), fact("but contained", duplicates), fullContents()); } }
@Test public void doesNotContainDuplicatesMixedTypes() { assertThat(asList(1, 2, 2L, 3)).containsNoDuplicates(); }
@Override public ConsumeMessageDirectlyResult consumeMessageDirectly(MessageExt msg, String brokerName) { ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult(); result.setOrder(false); result.setAutoCommit(true); List<MessageExt> msgs = new ArrayList<>(); msgs.add(msg); MessageQueue mq = new MessageQueue(); mq.setBrokerName(brokerName); mq.setTopic(msg.getTopic()); mq.setQueueId(msg.getQueueId()); ConsumeConcurrentlyContext context = new ConsumeConcurrentlyContext(mq); this.defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, this.consumerGroup); final long beginTime = System.currentTimeMillis(); log.info("consumeMessageDirectly receive new message: {}", msg); try { ConsumeConcurrentlyStatus status = this.messageListener.consumeMessage(msgs, context); if (status != null) { switch (status) { case CONSUME_SUCCESS: result.setConsumeResult(CMResult.CR_SUCCESS); break; case RECONSUME_LATER: result.setConsumeResult(CMResult.CR_LATER); break; default: break; } } else { result.setConsumeResult(CMResult.CR_RETURN_NULL); } } catch (Throwable e) { result.setConsumeResult(CMResult.CR_THROW_EXCEPTION); result.setRemark(UtilAll.exceptionSimpleDesc(e)); log.warn("consumeMessageDirectly exception: {} Group: {} Msgs: {} MQ: {}", UtilAll.exceptionSimpleDesc(e), ConsumeMessagePopConcurrentlyService.this.consumerGroup, msgs, mq, e); } result.setSpentTimeMills(System.currentTimeMillis() - beginTime); log.info("consumeMessageDirectly Result: {}", result); return result; }
@Test public void testConsumeMessageDirectlyWithCrLater() { when(messageListener.consumeMessage(any(), any(ConsumeConcurrentlyContext.class))).thenReturn(ConsumeConcurrentlyStatus.RECONSUME_LATER); ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker); assertEquals(CMResult.CR_LATER, actual.getConsumeResult()); }
@Override public Boolean load(@Nonnull final NamedQuantity key) { if (Strings.isNullOrEmpty(key.getName())) { return false; } final String filteredName = key.getName().trim(); for (final ItemThreshold entry : itemThresholds) { if (WildcardMatcher.matches(entry.getItemName(), filteredName) && entry.quantityHolds(key.getQuantity())) { return true; } } return false; }
@Test public void testLoadQuantities() { WildcardMatchLoader loader = new WildcardMatchLoader(Arrays.asList("rune* < 3", "*whip>3", "nature*<5", "*rune > 30")); assertTrue(loader.load(new NamedQuantity("Nature Rune", 50))); assertFalse(loader.load(new NamedQuantity("Nature Impling", 5))); assertTrue(loader.load(new NamedQuantity("Abyssal whip", 4))); assertFalse(loader.load(new NamedQuantity("Abyssal dagger", 1))); assertTrue(loader.load(new NamedQuantity("Rune Longsword", 2))); }
@Nullable public CommittableManager<CommT> getEndOfInputCommittable() { return checkpointCommittables.get(EOI); }
@Test void testGetEndOfInputCommittable() { final CommittableCollector<Integer> committableCollector = new CommittableCollector<>(1, 1, METRIC_GROUP); CommittableSummary<Integer> first = new CommittableSummary<>(1, 1, null, 1, 0, 0); committableCollector.addMessage(first); CommittableManager<Integer> endOfInputCommittable = committableCollector.getEndOfInputCommittable(); assertThat(endOfInputCommittable).isNotNull(); SinkV2Assertions.assertThat(endOfInputCommittable.getSummary()) .hasCheckpointId(Long.MAX_VALUE); }
@Override @MethodNotAvailable public CompletionStage<Void> setAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testSetAsync() { adapter.setAsync(42, "value"); }
@Override public ShardingSphereUser swapToObject(final YamlUserConfiguration yamlConfig) { if (null == yamlConfig) { return null; } Grantee grantee = convertYamlUserToGrantee(yamlConfig.getUser()); return new ShardingSphereUser(grantee.getUsername(), yamlConfig.getPassword(), grantee.getHostname(), yamlConfig.getAuthenticationMethodName(), yamlConfig.isAdmin()); }
@Test void assertSwapToObjectWithUserEndWithAt() { YamlUserConfiguration user = new YamlUserConfiguration(); user.setUser("foo_user@"); user.setPassword("foo_pwd"); ShardingSphereUser actual = new YamlUserSwapper().swapToObject(user); assertNotNull(actual); assertThat(actual.getGrantee().getUsername(), is("foo_user")); assertThat(actual.getGrantee().getHostname(), is("%")); assertThat(actual.getPassword(), is("foo_pwd")); }
public void autoRequeue() { autoRequeue.set(true); }
@Test public void testAutoRequeue() throws Exception { Timing timing = new Timing(); LeaderSelector selector = null; CuratorFramework client = CuratorFrameworkFactory.builder() .connectString(server.getConnectString()) .retryPolicy(new RetryOneTime(1)) .sessionTimeoutMs(timing.session()) .build(); try { client.start(); final Semaphore semaphore = new Semaphore(0); LeaderSelectorListener listener = new LeaderSelectorListener() { @Override public void takeLeadership(CuratorFramework client) throws Exception { Thread.sleep(10); semaphore.release(); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) {} }; selector = new LeaderSelector(client, "/leader", listener); selector.autoRequeue(); selector.start(); assertTrue(timing.acquireSemaphore(semaphore, 2)); } finally { CloseableUtils.closeQuietly(selector); CloseableUtils.closeQuietly(client); } }
public Optional<String> getErrorMsg() { if (hasLeader()) { return Optional.empty(); } return Optional.of("No leader for raft group " + Constants.NAMING_PERSISTENT_SERVICE_GROUP + ", please see logs `alipay-jraft.log` or `naming-raft.log` to see details."); }
@Test void testGetErrorMsg() { ServerStatusManager serverStatusManager = new ServerStatusManager(protocolManager, switchDomain); Optional<String> errorMsg = serverStatusManager.getErrorMsg(); assertTrue(errorMsg.isPresent()); }
@Override public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) { final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size()); final long now = time.milliseconds(); for (final Integer brokerId : brokers) { KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>(); futures.put(brokerId, future); runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) { @Override public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { // Query selected partitions in all log directories return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null)); } @Override public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; Map<String, LogDirDescription> descriptions = logDirDescriptions(response); if (!descriptions.isEmpty()) { future.complete(descriptions); } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode()); future.completeExceptionally(error.exception()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); } return new DescribeLogDirsResult(new HashMap<>(futures)); }
@Test public void testDescribeLogDirsPartialFailure() throws Exception { long defaultApiTimeout = 60000; MockTime time = new MockTime(); try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().prepareResponseFrom( prepareDescribeLogDirsResponse(Errors.NONE, "/data"), env.cluster().nodeById(1)); DescribeLogDirsResult result = env.adminClient().describeLogDirs(asList(0, 1)); // Wait until the prepared attempt has been consumed TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting requests"); // Wait until the request is sent out TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting request"); // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); TestUtils.assertFutureThrows(result.descriptions().get(0), ApiException.class); assertNotNull(result.descriptions().get(1).get()); } }
public void log(QueryLogParams params) { _logger.debug("Broker Response: {}", params._response); if (!(_logRateLimiter.tryAcquire() || shouldForceLog(params))) { _numDroppedLogs.incrementAndGet(); return; } final StringBuilder queryLogBuilder = new StringBuilder(); for (QueryLogEntry value : QUERY_LOG_ENTRY_VALUES) { value.format(queryLogBuilder, this, params); queryLogBuilder.append(','); } // always log the query last - don't add this to the QueryLogEntry enum queryLogBuilder.append("query=") .append(StringUtils.substring(params._requestContext.getQuery(), 0, _maxQueryLengthToLog)); _logger.info(queryLogBuilder.toString()); if (_droppedLogRateLimiter.tryAcquire()) { // use getAndSet to 0 so that there will be no race condition between // loggers that increment this counter and this thread long numDroppedLogsSinceLastLog = _numDroppedLogs.getAndSet(0); if (numDroppedLogsSinceLastLog > 0) { _logger.warn("{} logs were dropped. (log max rate per second: {})", numDroppedLogsSinceLastLog, _logRateLimiter.getRate()); } } }
@Test public void shouldNotForceLog() { // Given: Mockito.when(_logRateLimiter.tryAcquire()).thenReturn(false); QueryLogger.QueryLogParams params = generateParams(false, 0, 456); QueryLogger queryLogger = new QueryLogger(_logRateLimiter, 100, true, _logger, _droppedRateLimiter); // When: queryLogger.log(params); // Then: Assert.assertEquals(_infoLog.size(), 0); }
public static String getGroupedName(final String serviceName, final String groupName) { if (StringUtils.isBlank(serviceName)) { throw new IllegalArgumentException("Param 'serviceName' is illegal, serviceName is blank"); } if (StringUtils.isBlank(groupName)) { throw new IllegalArgumentException("Param 'groupName' is illegal, groupName is blank"); } final String resultGroupedName = groupName + Constants.SERVICE_INFO_SPLITER + serviceName; return resultGroupedName.intern(); }
@Test void testGetGroupedNameWithoutServiceName() { assertThrows(IllegalArgumentException.class, () -> { NamingUtils.getGroupedName("", "group"); }); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Filter class return type") public void testTicket3074() { Reader reader = new Reader(new OpenAPI()); OpenAPI oasResult = reader.read(RefParameter3074Resource.class); SerializationMatchers.assertEqualsToYaml(oasResult, RefParameter3074Resource.EXPECTED_YAML_WITH_WRAPPER); ModelConverters.getInstance().addClassToSkip("io.swagger.v3.jaxrs2.resources.RefParameter3074Resource$Wrapper"); reader = new Reader(new OpenAPI()); oasResult = reader.read(RefParameter3074Resource.class); SerializationMatchers.assertEqualsToYaml(oasResult, RefParameter3074Resource.EXPECTED_YAML_WITHOUT_WRAPPER); }
@Override public boolean isUserManaged(DbSession dbSession, String userUuid) { return findManagedInstanceService() .map(managedInstanceService -> managedInstanceService.isUserManaged(dbSession, userUuid)) .orElse(false); }
@Test public void isUserManaged_delegatesToRightService_andPropagateAnswer() { DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), new AlwaysManagedInstanceService())); assertThat(managedInstanceService.isUserManaged(dbSession, "whatever")).isTrue(); }
@POST @ApiOperation("Create a new view") @AuditEvent(type = ViewsAuditEventTypes.VIEW_CREATE) public ViewDTO create(@ApiParam @Valid @NotNull(message = "View is mandatory") ViewDTO dto, @Context UserContext userContext, @Context SearchUser searchUser) throws ValidationException { if (dto.type().equals(ViewDTO.Type.DASHBOARD) && !searchUser.canCreateDashboards()) { throw new ForbiddenException("User is not allowed to create new dashboards."); } validateIntegrity(dto, searchUser, true); final User user = userContext.getUser(); var result = dbService.saveWithOwner(dto.toBuilder().owner(searchUser.username()).build(), user); recentActivityService.create(result.id(), result.type().equals(ViewDTO.Type.DASHBOARD) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH, searchUser); return result; }
@Test public void throwsExceptionWhenCreatingDashboardWithFilterThatUserIsNotAllowedToSee() { final ViewsResource viewsResource = createViewsResource( mockViewService(TEST_DASHBOARD_VIEW), mock(StartPageService.class), mock(RecentActivityService.class), mock(ClusterEventBus.class), new ReferencedSearchFiltersHelper(), searchFilterVisibilityChecker(Collections.singletonList("<<You cannot see this filter>>")), EMPTY_VIEW_RESOLVERS, SEARCH ); Assertions.assertThatThrownBy(() -> viewsResource.create(TEST_DASHBOARD_VIEW, mock(UserContext.class), SEARCH_USER)) .isInstanceOf(BadRequestException.class) .hasMessageContaining("View cannot be saved, as it contains Search Filters which you are not privileged to view : [<<You cannot see this filter>>]"); }
@VisibleForTesting AuthRequest buildAuthRequest(Integer socialType, Integer userType) { // 1. 先查找默认的配置项,从 application-*.yaml 中读取 AuthRequest request = authRequestFactory.get(SocialTypeEnum.valueOfType(socialType).getSource()); Assert.notNull(request, String.format("社交平台(%d) 不存在", socialType)); // 2. 查询 DB 的配置项,如果存在则进行覆盖 SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(socialType, userType); if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) { // 2.1 构造新的 AuthConfig 对象 AuthConfig authConfig = (AuthConfig) ReflectUtil.getFieldValue(request, "config"); AuthConfig newAuthConfig = ReflectUtil.newInstance(authConfig.getClass()); BeanUtil.copyProperties(authConfig, newAuthConfig); // 2.2 修改对应的 clientId + clientSecret 密钥 newAuthConfig.setClientId(client.getClientId()); newAuthConfig.setClientSecret(client.getClientSecret()); if (client.getAgentId() != null) { // 如果有 agentId 则修改 agentId newAuthConfig.setAgentId(client.getAgentId()); } // 2.3 设置会 request 里,进行后续使用 ReflectUtil.setFieldValue(request, "config", newAuthConfig); } return request; }
@Test public void testBuildAuthRequest_clientEnable() { // 准备参数 Integer socialType = SocialTypeEnum.WECHAT_MP.getType(); Integer userType = randomPojo(SocialTypeEnum.class).getType(); // mock 获得对应的 AuthRequest 实现 AuthConfig authConfig = mock(AuthConfig.class); AuthRequest authRequest = mock(AuthDefaultRequest.class); ReflectUtil.setFieldValue(authRequest, "config", authConfig); when(authRequestFactory.get(eq("WECHAT_MP"))).thenReturn(authRequest); // mock 数据 SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()) .setUserType(userType).setSocialType(socialType)); socialClientMapper.insert(client); // 调用 AuthRequest result = socialClientService.buildAuthRequest(socialType, userType); // 断言 assertSame(authRequest, result); assertNotSame(authConfig, ReflectUtil.getFieldValue(authRequest, "config")); }
public static HttpRequest.Builder buildRefreshRequestUrlForAccessToken( TokensAndUrlAuthData authData, AppCredentials appCredentials) throws IllegalStateException { BodyPublisher postBody = buildRefreshRequestPostBody(authData, appCredentials); URI refreshUri = authData.getTokenServerEncodedUri(); return HttpRequest.newBuilder() .uri(refreshUri) .POST(postBody) .header("content-type", "application/x-www-form-urlencoded"); }
@Test public void test_buildRefreshRequestUrlForAccessToken() { // // Arrange // final String fakeAuthUrl = "https://www.example.com/auth"; TokensAndUrlAuthData fakeAuthData = new TokensAndUrlAuthData("my_access_token", "my_refresh_token", fakeAuthUrl); AppCredentials mockAppCredentials = mock(AppCredentials.class); when(mockAppCredentials.getKey()).thenReturn("fake-client_id-contents-here"); when(mockAppCredentials.getSecret()).thenReturn("fake-client_secert-contents-here"); // // Act // HttpRequest actualRequest = TokenRefresher.buildRefreshRequestUrlForAccessToken(fakeAuthData, mockAppCredentials) .build(); // // Assert // assertEquals(actualRequest.method(), "POST"); assertEquals(actualRequest.uri().toString(), fakeAuthUrl); assertFalse(actualRequest.bodyPublisher().isEmpty()); assertPublishes( actualRequest.bodyPublisher().get(), "refresh_token=my_refresh_token&grant_type=refresh_token&client_secret=fake-client_secert-contents-here&client_id=fake-client_id-contents-here"); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { final boolean satisfied = first.getValue(index).isLessThan(second.getValue(index)); traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfied() { assertTrue(rule.isSatisfied(0)); assertFalse(rule.isSatisfied(1)); assertFalse(rule.isSatisfied(2)); assertFalse(rule.isSatisfied(3)); assertTrue(rule.isSatisfied(4)); assertFalse(rule.isSatisfied(5)); assertFalse(rule.isSatisfied(6)); assertFalse(rule.isSatisfied(7)); }
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory( final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration, final Configuration jobConfiguration, final Configuration clusterConfiguration, final boolean isCheckpointingEnabled) { checkNotNull(jobRestartStrategyConfiguration); checkNotNull(jobConfiguration); checkNotNull(clusterConfiguration); return getJobRestartStrategyFactory(jobRestartStrategyConfiguration) .orElse( getRestartStrategyFactoryFromConfig(jobConfiguration) .orElse( (getRestartStrategyFactoryFromConfig(clusterConfiguration) .orElse( getDefaultRestartStrategyFactory( isCheckpointingEnabled))))); }
@Test void testNoStrategySpecifiedWhenCheckpointingDisabled() { final RestartBackoffTimeStrategy.Factory factory = RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory( DEFAULT_JOB_LEVEL_RESTART_CONFIGURATION, new Configuration(), new Configuration(), false); assertThat(factory) .isInstanceOf( NoRestartBackoffTimeStrategy.NoRestartBackoffTimeStrategyFactory.class); }
public UpsertPartitioner(WorkloadProfile profile, HoodieEngineContext context, HoodieTable table, HoodieWriteConfig config, WriteOperationType operationType) { super(profile, table); updateLocationToBucket = new HashMap<>(); partitionPathToInsertBucketInfos = new HashMap<>(); bucketInfoMap = new HashMap<>(); this.config = config; this.operationType = operationType; assignUpdates(profile); long totalInserts = profile.getInputPartitionPathStatMap().values().stream().mapToLong(stat -> stat.getNumInserts()).sum(); if (!WriteOperationType.isPreppedWriteOperation(operationType) || totalInserts > 0) { // skip if its prepped write operation. or if totalInserts = 0. assignInserts(profile, context); } LOG.info("Total Buckets: {}, bucketInfoMap size: {}, partitionPathToInsertBucketInfos size: {}, updateLocationToBucket size: {}", totalBuckets, bucketInfoMap.size(), partitionPathToInsertBucketInfos.size(), updateLocationToBucket.size()); if (LOG.isDebugEnabled()) { LOG.debug("Buckets info => " + bucketInfoMap + ", \n" + "Partition to insert buckets => " + partitionPathToInsertBucketInfos + ", \n" + "UpdateLocations mapped to buckets =>" + updateLocationToBucket); } }
@Test public void testUpsertPartitioner() throws Exception { final String testPartitionPath = "2016/09/26"; // Inserts + Updates... Check all updates go together & inserts subsplit UpsertPartitioner partitioner = getUpsertPartitioner(0, 200, 100, 1024, testPartitionPath, false); List<InsertBucketCumulativeWeightPair> insertBuckets = partitioner.getInsertBuckets(testPartitionPath); assertEquals(2, insertBuckets.size(), "Total of 2 insert buckets"); }