focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@VisibleForTesting static Row getRowConfig(ManagedConfig config, Schema transformSchema) { // May return an empty row (perhaps the underlying transform doesn't have any required // parameters) String yamlConfig = config.resolveUnderlyingConfig(); Map<String, Object> configMap = YamlUtils.yamlStringToMap(yamlConfig); // The config Row object will be used to build the underlying SchemaTransform. // If a mapping for the SchemaTransform exists, we use it to update parameter names and align // with the underlying config schema Map<String, String> mapping = MAPPINGS.get(config.getTransformIdentifier()); if (mapping != null && configMap != null) { Map<String, Object> remappedConfig = new HashMap<>(); for (Map.Entry<String, Object> entry : configMap.entrySet()) { String paramName = entry.getKey(); if (mapping.containsKey(paramName)) { paramName = mapping.get(paramName); } remappedConfig.put(paramName, entry.getValue()); } configMap = remappedConfig; } return YamlUtils.toBeamRow(configMap, transformSchema, false); }
@Test public void testGetConfigRowFromYamlString() { String yamlString = "extra_string: abc\n" + "extra_integer: 123"; ManagedConfig config = ManagedConfig.builder() .setTransformIdentifier(TestSchemaTransformProvider.IDENTIFIER) .setConfig(yamlString) .build(); Row expectedRow = Row.withSchema(TestSchemaTransformProvider.SCHEMA) .withFieldValue("extra_string", "abc") .withFieldValue("extra_integer", 123) .build(); Row returnedRow = ManagedSchemaTransformProvider.getRowConfig(config, TestSchemaTransformProvider.SCHEMA); assertEquals(expectedRow, returnedRow); }
public String getMethod(){ return method; }
@Test public void testPostRequestEncodings() throws Exception { String url = "http://localhost/matrix.html"; // A HTTP POST request, with encoding not known String contentEncoding = ""; String param1Value = "yes"; String param2Value = "0+5 -\u00c5%C3%85"; String param2ValueEncoded = URLEncoder.encode(param2Value,"UTF-8"); String postBody = "param1=" + param1Value + "&param2=" + param2ValueEncoded + "\r\n"; String testPostRequest = "POST " + url + " HTTP/1.1\r\n" + "Content-type: " + HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED + "\r\n" + "Content-length: " + getBodyLength(postBody, contentEncoding) + "\r\n" + "\r\n" + postBody; // Use null for url and contentEncoding, to simulate that HttpRequestHdr do not // know the encoding for the page HTTPSamplerBase s = getSamplerForRequest(null, testPostRequest, null); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals("UTF-8", s.getContentEncoding(), "Default content encoding is UTF-8"); // Check arguments Arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "param1", param1Value, param1Value, contentEncoding, false); // When the encoding is not known, we expect UTF-8 by default checkArgument((HTTPArgument)arguments.getArgument(1), "param2", param2Value, param2ValueEncoded, contentEncoding, true); // A HTTP POST request, with UTF-8 encoding contentEncoding = "UTF-8"; param1Value = "yes"; param2Value = "0+5 -|\u2aa1\u266a\u0153\u20a1\u0115\u0364\u00c5\u2052\uc385%C3%85"; param2ValueEncoded = URLEncoder.encode(param2Value, contentEncoding); postBody = "param1=" + param1Value + "&param2=" + param2ValueEncoded + "\r\n"; testPostRequest = "POST " + url + " HTTP/1.1\r\n" + "Content-type: " + HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED + "\r\n" + "Content-length: " + getBodyLength(postBody, contentEncoding) + "\r\n" + "\r\n" + postBody; s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); // Check arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "param1", param1Value, param1Value, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "param2", param2Value, param2ValueEncoded, contentEncoding, true); // A HTTP POST request, with ISO-8859-1 encoding contentEncoding = "ISO-8859-1"; param1Value = "yes"; param2Value = "0+5 -\u00c5%C3%85"; param2ValueEncoded = URLEncoder.encode(param2Value, contentEncoding); postBody = "param1=" + param1Value + "&param2=" + param2ValueEncoded + "\r\n"; testPostRequest = "POST " + url + " HTTP/1.1\r\n" + "Content-type: " + HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED + "\r\n" + "Content-length: " + getBodyLength(postBody, contentEncoding) + "\r\n" + "\r\n" + postBody; s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); // Check arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "param1", param1Value, param1Value, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "param2", param2Value, param2ValueEncoded, contentEncoding, true); }
@Override public boolean execute() throws SQLException { return ExecuteTemplate.execute(this, (statement, args) -> statement.execute()); }
@Test public void testExecute() throws SQLException { preparedStatementProxy.execute(); }
public static String substringBetween(String str, String open, String close) { if (str == null || open == null || close == null) { return null; } int start = str.indexOf(open); if (start != INDEX_NOT_FOUND) { int end = str.indexOf(close, start + open.length()); if (end != INDEX_NOT_FOUND) { return str.substring(start + open.length(), end); } } return null; }
@Test void testSubstringBetween() { assertNull(StringUtils.substringBetween(null, "a", "b")); assertNull(StringUtils.substringBetween("a", null, "b")); assertNull(StringUtils.substringBetween("a", "b", null)); assertNull(StringUtils.substringBetween(StringUtils.EMPTY, StringUtils.EMPTY, "]")); assertNull(StringUtils.substringBetween(StringUtils.EMPTY, "[", "]")); assertEquals(StringUtils.EMPTY, StringUtils.substringBetween("yabcz", StringUtils.EMPTY, StringUtils.EMPTY)); assertEquals(StringUtils.EMPTY, StringUtils.substringBetween(StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY)); assertEquals("b", StringUtils.substringBetween("wx[b]yz", "[", "]")); assertEquals("abc", StringUtils.substringBetween("yabcz", "y", "z")); assertEquals("abc", StringUtils.substringBetween("yabczyabcz", "y", "z")); }
@Bean public DivideUpstreamDataHandler divideUpstreamDataHandler() { return new DivideUpstreamDataHandler(); }
@Test public void testDivideUpstreamDataHandler() { applicationContextRunner.run(context -> { DivideUpstreamDataHandler handler = context.getBean("divideUpstreamDataHandler", DivideUpstreamDataHandler.class); assertNotNull(handler); } ); }
@Override public boolean contains(final Object value) { return value instanceof Long l && contains(l.longValue()); }
@Test public void initiallyContainsNoElements() { for (int i = 0; i < 10000; i++) { assertFalse(set.contains(i)); } }
@Override public synchronized String toString() { if (parameters.isEmpty()) { return ""; } StringBuilder b = new StringBuilder(); char sep = '?'; for (String key : parameters.keySet()) { for (String value : parameters.get(key)) { b.append(sep); sep = '&'; try { b.append(URLEncoder.encode(key, encoding)); b.append('='); b.append(URLEncoder.encode(value, encoding)); } catch (UnsupportedEncodingException e) { throw new RuntimeException( "Cannot URL-encode query string (key=" + key + "; value=" + value + ").", e); } } } return b.toString(); }
@Test public void testEmptyConstructor() { QueryString qs = new QueryString(); Assert.assertEquals("", qs.toString()); }
@Override public GeneratedKeyInsertColumnToken generateSQLToken(final InsertStatementContext insertStatementContext) { Optional<GeneratedKeyContext> generatedKey = insertStatementContext.getGeneratedKeyContext(); Preconditions.checkState(generatedKey.isPresent()); Optional<InsertColumnsSegment> sqlSegment = insertStatementContext.getSqlStatement().getInsertColumns(); Preconditions.checkState(sqlSegment.isPresent()); return new GeneratedKeyInsertColumnToken(sqlSegment.get().getStopIndex(), generatedKey.get().getColumnName()); }
@Test void assertGenerateSQLToken() { GeneratedKeyContext generatedKeyContext = mock(GeneratedKeyContext.class); final String testColumnName = "TEST_COLUMN_NAME"; when(generatedKeyContext.getColumnName()).thenReturn(testColumnName); InsertStatementContext insertStatementContext = mock(InsertStatementContext.class, RETURNS_DEEP_STUBS); when(insertStatementContext.getGeneratedKeyContext()).thenReturn(Optional.of(generatedKeyContext)); InsertColumnsSegment insertColumnsSegment = mock(InsertColumnsSegment.class); final int testStopIndex = 4; when(insertColumnsSegment.getStopIndex()).thenReturn(testStopIndex); when(insertStatementContext.getSqlStatement().getInsertColumns()).thenReturn(Optional.of(insertColumnsSegment)); GeneratedKeyInsertColumnTokenGenerator generatedKeyInsertColumnTokenGenerator = new GeneratedKeyInsertColumnTokenGenerator(); assertThat(generatedKeyInsertColumnTokenGenerator.generateSQLToken(insertStatementContext).toString(), is(", " + testColumnName)); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_maps_alert_properties_in_dto_for_String_Metric() { ScannerReport.Measure batchMeasure = ScannerReport.Measure.newBuilder() .setStringValue(StringValue.newBuilder().setValue(SOME_DATA)) .build(); Optional<Measure> measure = underTest.toMeasure(batchMeasure, SOME_STRING_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.STRING); assertThat(measure.get().getStringValue()).isEqualTo(SOME_DATA); assertThat(measure.get().getData()).isEqualTo(SOME_DATA); }
protected Map<String, List<Field>> parsePkValues(TableRecords records) { return parsePkValues(records.getRows(), records.getTableMeta().getPrimaryKeyOnlyName()); }
@Test public void testParsePK() { TableMeta tableMeta = Mockito.mock(TableMeta.class); Mockito.when(tableMeta.getPrimaryKeyOnlyName()).thenReturn(Collections.singletonList("id")); Mockito.when(tableMeta.getTableName()).thenReturn("table_name"); TableRecords beforeImage = new TableRecords(); beforeImage.setTableName("table_name"); beforeImage.setTableMeta(tableMeta); List<Row> beforeRows = new ArrayList<>(); Row row0 = new Row(); addField(row0, "id", 1, "12345"); addField(row0, "age", 1, "2"); beforeRows.add(row0); Row row1 = new Row(); addField(row1, "id", 1, "12346"); addField(row1, "age", 1, "2"); beforeRows.add(row1); beforeImage.setRows(beforeRows); SQLUndoLog sqlUndoLog = new SQLUndoLog(); sqlUndoLog.setSqlType(SQLType.UPDATE); sqlUndoLog.setTableMeta(tableMeta); sqlUndoLog.setTableName("table_name"); sqlUndoLog.setBeforeImage(beforeImage); sqlUndoLog.setAfterImage(null); TestUndoExecutor executor = new TestUndoExecutor(sqlUndoLog, true); Map<String,List<Field>> pkValues = executor.parsePkValues(beforeImage); Assertions.assertEquals(2, pkValues.get("id").size()); }
public void lazyRefresh(String includesFile, String excludesFile) throws IOException { refreshInternal(includesFile, excludesFile, true); }
@Test public void testLazyRefresh() throws IOException { FileWriter efw = new FileWriter(excludesFile); FileWriter ifw = new FileWriter(includesFile); efw.write("host1\n"); efw.write("host2\n"); efw.close(); ifw.write("host3\n"); ifw.write("host4\n"); ifw.close(); HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile); ifw = new FileWriter(includesFile); ifw.close(); efw = new FileWriter(excludesFile, true); efw.write("host3\n"); efw.write("host4\n"); efw.close(); hfp.lazyRefresh(includesFile, excludesFile); HostDetails details = hfp.getHostDetails(); HostDetails lazyDetails = hfp.getLazyLoadedHostDetails(); assertEquals("Details: no. of excluded hosts", 2, details.getExcludedHosts().size()); assertEquals("Details: no. of included hosts", 2, details.getIncludedHosts().size()); assertEquals("LazyDetails: no. of excluded hosts", 4, lazyDetails.getExcludedHosts().size()); assertEquals("LayDetails: no. of included hosts", 0, lazyDetails.getIncludedHosts().size()); hfp.finishRefresh(); details = hfp.getHostDetails(); assertEquals("Details: no. of excluded hosts", 4, details.getExcludedHosts().size()); assertEquals("Details: no. of included hosts", 0, details.getIncludedHosts().size()); assertNull("Lazy host details should be null", hfp.getLazyLoadedHostDetails()); }
@Override public VoidOutput run(RunContext runContext) throws Exception { String renderedNamespace = runContext.render(this.namespace); String renderedKey = runContext.render(this.key); Object renderedValue = runContext.renderTyped(this.value); KVStore kvStore = runContext.namespaceKv(renderedNamespace); kvStore.put(renderedKey, new KVValueAndMetadata(new KVMetadata(ttl), renderedValue), this.overwrite); return null; }
@Test void shouldSetKVGivenSameNamespace() throws Exception { // Given RunContext runContext = this.runContextFactory.of(Map.of( "flow", Map.of("namespace", "io.kestra.test"), "inputs", Map.of( "key", TEST_KEY, "value", "test-value" ) )); Set set = Set.builder() .id(Set.class.getSimpleName()) .type(Set.class.getName()) .key("{{ inputs.key }}") .value("{{ inputs.value }}") .namespace("io.kestra.test") .build(); // When set.run(runContext); // Then final KVStore kv = runContext.namespaceKv("io.kestra.test"); assertThat(kv.getValue(TEST_KEY), is(Optional.of(new KVValue("test-value")))); assertThat(kv.list().getFirst().expirationDate(), nullValue()); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testBloodEssenceActivate() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", ACTIVATE_BLOOD_ESSENCE, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_BLOOD_ESSENCE, 1000); }
public static boolean isEstablishedAtAltitude(Track track, Instant queryTime, Duration duration) { TimeWindow window = TimeWindow.of(queryTime.minus(duration), queryTime); NavigableSet<Point> recentPoints = (NavigableSet<Point>) track.subset(window); if (recentPoints.isEmpty()) { return false; } List<Distance> recentDistances = recentPoints .stream() .map(Point::altitude) .collect(toList()); Distance min = min(recentDistances); Distance max = max(recentDistances); Distance altitudeRange = max.minus(min); boolean allAltitudesWithin100Feet = altitudeRange.isLessThanOrEqualTo(Distance.ofFeet(100)); boolean atLeastTwoPoints = recentPoints.size() >= 2; return atLeastTwoPoints && allAltitudesWithin100Feet; }
@Test public void isEstablishedAtAltitude_providesCorrectAnswer() { Track track = makeTrackFromNopData(getResourceFile("Track2.txt")); Duration fiveSeconds = Duration.ofSeconds(5); assertThat( isEstablishedAtAltitude(track, track.startTime(), fiveSeconds), is(false) ); //first time at 8,000ft Instant timeAt8000 = parseNopTime("07/08/2017", "14:23:13.220"); //when you JUST hit 8,000ft you ARE established if you require 31 seconds (because you'll only have 7900 and 8000 foot altitudes) assertThat( isEstablishedAtAltitude(track, timeAt8000, Duration.ofSeconds(31)), is(true) ); //when you JUST hit 8,000ft you ARE NOT established if you require 32 seconds (because you'll have 7800, 7900, and 8000 foot altitudes) assertThat( isEstablishedAtAltitude(track, timeAt8000, Duration.ofSeconds(32)), is(false) ); //2nd point at 8,000ft Instant timeAt8000_2 = parseNopTime("07/08/2017", "14:23:17.249"); //you are established at an altitude after the 2nd point assertThat( isEstablishedAtAltitude(track, timeAt8000_2, fiveSeconds), is(true) ); //another point at 8,000ft Instant timeAt8000_4 = parseNopTime("07/08/2017", "14:23:24.147"); assertThat( isEstablishedAtAltitude(track, timeAt8000_4, fiveSeconds), is(true) ); //you can make the test above fail by requiring a long duration at the same altitude assertThat( isEstablishedAtAltitude(track, timeAt8000_4, Duration.ofMinutes(10)), is(false) ); Instant timeAt7900 = parseNopTime("07/08/2017", "14:36:40.981"); //you'll still be established at the altitude even though dropped from 8,000ft to 7,900ft. assertThat( isEstablishedAtAltitude(track, timeAt7900, Duration.ofSeconds(30)), is(true) ); }
@BuildStep AdditionalBeanBuildItem produce(Capabilities capabilities, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) { Set<Class<?>> additionalBeans = new HashSet<>(); additionalBeans.add(JobRunrProducer.class); additionalBeans.add(JobRunrStarter.class); additionalBeans.add(jsonMapper(capabilities)); additionalBeans.addAll(storageProvider(capabilities, jobRunrBuildTimeConfiguration)); return AdditionalBeanBuildItem.builder() .setUnremovable() .addBeanClasses(additionalBeans.toArray(new Class[0])) .build(); }
@Test void jobRunrProducerUsesJSONBIfCapabilityPresent() { Mockito.reset(capabilities); lenient().when(capabilities.isPresent(Capability.JSONB)).thenReturn(true); final AdditionalBeanBuildItem additionalBeanBuildItem = jobRunrExtensionProcessor.produce(capabilities, jobRunrBuildTimeConfiguration); assertThat(additionalBeanBuildItem.getBeanClasses()) .contains(JobRunrProducer.JobRunrJsonBJsonMapperProducer.class.getName()); }
@ConstantFunction(name = "bitxor", argTypes = {LARGEINT, LARGEINT}, returnType = LARGEINT) public static ConstantOperator bitxorLargeInt(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createLargeInt(first.getLargeInt().xor(second.getLargeInt())); }
@Test public void bitxorLargeInt() { assertEquals("0", ScalarOperatorFunctions.bitxorLargeInt(O_LI_100, O_LI_100).getLargeInt().toString()); }
@Override public String builder(final String paramName, final ServerWebExchange exchange) { return HostAddressUtils.acquireIp(exchange); }
@Test public void testBuilderWithNullParamName() { assertEquals(testHost, ipParameterData.builder(null, exchange)); }
@Override public String toString() { return this.toJSONString(0); }
@Test public void setEntryTest() { final HashMap<String, String> of = MapUtil.of("test", "testValue"); final Set<Map.Entry<String, String>> entries = of.entrySet(); final Map.Entry<String, String> next = entries.iterator().next(); final JSONObject jsonObject = JSONUtil.parseObj(next); assertEquals("{\"test\":\"testValue\"}", jsonObject.toString()); }
protected boolean isLoggerSafe(ILoggingEvent event) { for (String safeLogger : SAFE_LOGGERS) { if (event.getLoggerName().startsWith(safeLogger)) { return true; } } return false; }
@Test void isLoggerSafeShouldReturnTrueWhenLoggerNameStartsWithSafeLogger() { ILoggingEvent event = mock(ILoggingEvent.class); when(event.getLoggerName()).thenReturn("org.springframework.boot.autoconfigure.example.Logger"); CRLFLogConverter converter = new CRLFLogConverter(); boolean result = converter.isLoggerSafe(event); assertTrue(result); }
@Nullable public static Result parse(String url) { return parse(url, true); }
@Test public void testParse() { GalleryPageUrlParser.Result result = GalleryPageUrlParser.parse(url, strict); if (isNull) { assertNull(result); } else { assertEquals(gid, result.gid); assertEquals(pToken, result.pToken); assertEquals(page, result.page); } }
@Override public void upgrade() { Optional<IndexSetTemplate> defaultIndexSetTemplate = indexSetDefaultTemplateService.getDefaultIndexSetTemplate(); if (defaultIndexSetTemplate.isEmpty()) { IndexSetsDefaultConfiguration legacyDefaultConfig = clusterConfigService.get(IndexSetsDefaultConfiguration.class); if (legacyDefaultConfig == null) { saveDefaultTemplate(factory.create()); } else { saveDefaultTemplate(createTemplateConfig(legacyDefaultConfig)); removeLegacyConfig(); } } else { LOG.debug("Migration already completed."); } }
@Test void testNoDefaultTemplateAndLegacyConfigExists() { mockElasticConfig(); IndexSetTemplateConfig defaultConfiguration = defaultConfigurationFactory.create(); underTest.upgrade(); verify(indexSetDefaultTemplateService).createAndSaveDefault(createTemplate(defaultConfiguration)); assertThat(defaultConfiguration.useLegacyRotation()).isFalse(); }
@Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } else { SimplifiedReconciliation reconciliation = (SimplifiedReconciliation) o; return this.kind.equals(reconciliation.kind) && this.name.equals(reconciliation.name) && this.namespace.equals(reconciliation.namespace); } }
@Test public void testEquals() { SimplifiedReconciliation r1 = new SimplifiedReconciliation("kind", "my-namespace", "my-name", "watch"); SimplifiedReconciliation r2 = new SimplifiedReconciliation("kind", "my-namespace", "my-name", "timer"); SimplifiedReconciliation r3 = new SimplifiedReconciliation("kind", "my-namespace", "my-other-name", "watch"); assertThat(r1.equals(r2), is(true)); assertThat(r2.equals(r1), is(true)); assertThat(r1.equals(r3), is(false)); }
boolean shouldRetry(GetQueryExecutionResponse getQueryExecutionResponse) { String stateChangeReason = getQueryExecutionResponse.queryExecution().status().stateChangeReason(); if (this.retry.contains("never")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as not retryable", stateChangeReason); return false; } if (this.retry.contains("always")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as retryable", stateChangeReason); return true; } // Generic errors happen sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("GENERIC_INTERNAL_ERROR") && (this.retry.contains("generic") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected generic error ({}), marked as retryable", stateChangeReason); return true; } // Resource exhaustion happens sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("exhausted resources at this scale factor") && (this.retry.contains("exhausted") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected resource exhaustion error ({}), marked as retryable", stateChangeReason); return true; } return false; }
@Test public void shouldRetryReturnsTrueForGenericInternalError() { Athena2QueryHelper helper = athena2QueryHelperWithRetry("retryable"); assertTrue(helper.shouldRetry(newGetQueryExecutionResponse(QueryExecutionState.FAILED, "GENERIC_INTERNAL_ERROR"))); }
public static boolean anyUnSet(MemoryBuffer bitmapBuffer, int baseOffset, int valueCount) { final int sizeInBytes = (valueCount + 7) / 8; // If value count is not a multiple of 8, then calculate number of used bits in the last byte final int remainder = valueCount % 8; final int sizeInBytesMinus1 = sizeInBytes - 1; int bytesMinus1EndOffset = baseOffset + sizeInBytesMinus1; for (int i = baseOffset; i < bytesMinus1EndOffset; i++) { if (bitmapBuffer.getByte(i) != (byte) 0xFF) { return true; } } // handling with the last byte // since unsafe putLong use native byte order, maybe not big endian, // see java.nio.DirectByteBuffer.putLong(long, long), we can't use unsafe.putLong // for bit operations, native byte order may be subject to change between machine, // so we use getByte if (remainder != 0) { byte byteValue = bitmapBuffer.getByte(baseOffset + sizeInBytesMinus1); // Every byte is set form right to left byte mask = (byte) (0xFF >>> (8 - remainder)); return byteValue != mask; } return false; }
@Test public void anyUnSet() { int valueCount = 10; MemoryBuffer buffer = MemoryUtils.buffer(valueCount); int i = 0; BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); BitUtils.set(buffer, 0, i++); assertFalse(BitUtils.anyUnSet(buffer, 0, valueCount)); StringUtils.encodeHexString(buffer.getRemainingBytes()); }
public static void validateMaterializedViewPartitionColumns( SemiTransactionalHiveMetastore metastore, MetastoreContext metastoreContext, Table viewTable, MaterializedViewDefinition viewDefinition) { SchemaTableName viewName = new SchemaTableName(viewTable.getDatabaseName(), viewTable.getTableName()); Map<String, Map<SchemaTableName, String>> viewToBaseDirectColumnMap = viewDefinition.getDirectColumnMappingsAsMap(); if (viewToBaseDirectColumnMap.isEmpty()) { throw new PrestoException( NOT_SUPPORTED, format("Materialized view %s must have at least one column directly defined by a base table column.", viewName)); } List<Column> viewPartitions = viewTable.getPartitionColumns(); if (viewPartitions.isEmpty()) { throw new PrestoException(NOT_SUPPORTED, "Unpartitioned materialized view is not supported."); } List<Table> baseTables = viewDefinition.getBaseTables().stream() .map(baseTableName -> metastore.getTable(metastoreContext, baseTableName.getSchemaName(), baseTableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(baseTableName))) .collect(toImmutableList()); Map<Table, List<Column>> baseTablePartitions = baseTables.stream() .collect(toImmutableMap( table -> table, Table::getPartitionColumns)); for (Table baseTable : baseTablePartitions.keySet()) { SchemaTableName schemaBaseTable = new SchemaTableName(baseTable.getDatabaseName(), baseTable.getTableName()); if (!isCommonPartitionFound(schemaBaseTable, baseTablePartitions.get(baseTable), viewPartitions, viewToBaseDirectColumnMap)) { throw new PrestoException( NOT_SUPPORTED, format("Materialized view %s must have at least one partition column that exists in %s as well", viewName, baseTable.getTableName())); } if (viewDefinition.getBaseTablesOnOuterJoinSide().contains(schemaBaseTable) && viewToBaseTableOnOuterJoinSideIndirectMappedPartitions(viewDefinition, baseTable).get().isEmpty()) { throw new PrestoException( NOT_SUPPORTED, format("Outer join conditions in Materialized view %s must have at least one common partition equality constraint", viewName)); } } }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Materialized view schema.table must have at least one column directly defined by a base table column.") public void testValidateMaterializedViewPartitionColumnsEmptyBaseColumnMap() { TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create(); Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty()); Column shipmodeColumn = new Column("shipmode", HIVE_STRING, Optional.empty(), Optional.empty()); List<Column> partitionColumns = ImmutableList.of(dsColumn, shipmodeColumn); SchemaTableName tableName = new SchemaTableName(SCHEMA_NAME, TABLE_NAME); Map<String, Map<SchemaTableName, String>> originalColumnMapping = ImmutableMap.of(); testMetastore.addTable(SCHEMA_NAME, TABLE_NAME, getTable(partitionColumns), ImmutableList.of()); List<Column> viewPartitionColumns = ImmutableList.of(dsColumn); validateMaterializedViewPartitionColumns(testMetastore, metastoreContext, getTable(viewPartitionColumns), getConnectorMaterializedViewDefinition(ImmutableList.of(tableName), originalColumnMapping)); }
public static String escapeString(String identifier) { return "'" + identifier.replace("\\", "\\\\").replace("'", "\\'") + "'"; }
@Test public void testEscapeStringEmpty() { assertEquals("''", SingleStoreUtil.escapeString("")); }
public static byte[] toLH(int n) { byte[] b = new byte[4]; b[0] = (byte) (n & 0xff); b[1] = (byte) (n >> 8 & 0xff); b[2] = (byte) (n >> 16 & 0xff); b[3] = (byte) (n >> 24 & 0xff); return b; }
@Test public void toLHInputZeroOutput4() { // Arrange final int n = 0; // Act final byte[] actual = RegisterSlaveCommandPacket.toLH(n); // Assert result Assert.assertArrayEquals(new byte[] { (byte) 0, (byte) 0, (byte) 0, (byte) 0 }, actual); }
@Override public WriteTxnMarkersRequest.Builder buildBatchedRequest( int brokerId, Set<TopicPartition> topicPartitions ) { validateTopicPartitions(topicPartitions); WriteTxnMarkersRequestData.WritableTxnMarker marker = new WriteTxnMarkersRequestData.WritableTxnMarker() .setCoordinatorEpoch(abortSpec.coordinatorEpoch()) .setProducerEpoch(abortSpec.producerEpoch()) .setProducerId(abortSpec.producerId()) .setTransactionResult(false); marker.topics().add(new WriteTxnMarkersRequestData.WritableTxnMarkerTopic() .setName(abortSpec.topicPartition().topic()) .setPartitionIndexes(singletonList(abortSpec.topicPartition().partition())) ); WriteTxnMarkersRequestData request = new WriteTxnMarkersRequestData(); request.markers().add(marker); return new WriteTxnMarkersRequest.Builder(request); }
@Test public void testValidBuildRequestCall() { AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext); WriteTxnMarkersRequest.Builder request = handler.buildBatchedRequest(1, singleton(topicPartition)); assertEquals(1, request.data.markers().size()); WriteTxnMarkersRequestData.WritableTxnMarker markerRequest = request.data.markers().get(0); assertEquals(abortSpec.producerId(), markerRequest.producerId()); assertEquals(abortSpec.producerEpoch(), markerRequest.producerEpoch()); assertEquals(abortSpec.coordinatorEpoch(), markerRequest.coordinatorEpoch()); assertEquals(1, markerRequest.topics().size()); WriteTxnMarkersRequestData.WritableTxnMarkerTopic topicRequest = markerRequest.topics().get(0); assertEquals(abortSpec.topicPartition().topic(), topicRequest.name()); assertEquals(singletonList(abortSpec.topicPartition().partition()), topicRequest.partitionIndexes()); }
@Override public ConsumerBuilder<T> patternAutoDiscoveryPeriod(int periodInMinutes) { checkArgument(periodInMinutes >= 0, "periodInMinutes needs to be >= 0"); patternAutoDiscoveryPeriod(periodInMinutes, TimeUnit.MINUTES); return this; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testConsumerBuilderImplWhenPatternAutoDiscoveryPeriodPeriodInMinutesIsNegative() { consumerBuilderImpl.patternAutoDiscoveryPeriod(-1); }
public static Integer jqInteger(String value, String expression) { return H2Functions.jq(value, expression, JsonNode::asInt); }
@Test public void jqInteger() { Integer jqString = H2Functions.jqInteger("{\"a\": 2147483647}", ".a"); assertThat(jqString, is(2147483647)); }
public static HazelcastInstance newHazelcastInstance(Config config) { if (config == null) { config = Config.load(); } return newHazelcastInstance( config, config.getInstanceName(), new DefaultNodeContext() ); }
@Test public void fixedNameGeneratedIfPropertyNotDefined() { Config config = new Config(); hazelcastInstance = HazelcastInstanceFactory.newHazelcastInstance(config); String name = hazelcastInstance.getName(); assertNotNull(name); assertNotContains(name, "_hzInstance_"); }
@Override public AtomicValue<Long> compareAndSet(Long expectedValue, Long newValue) throws Exception { return new AtomicLong(value.compareAndSet(valueToBytes(expectedValue), valueToBytes(newValue))); }
@Test public void testCompareAndSet() throws Exception { final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { final AtomicBoolean doIncrement = new AtomicBoolean(false); DistributedAtomicLong dal = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1)) { @Override public byte[] valueToBytes(Long newValue) { if (doIncrement.get()) { DistributedAtomicLong inc = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1)); try { // this will force a bad version exception inc.increment(); } catch (Exception e) { throw new Error(e); } } return super.valueToBytes(newValue); } }; dal.forceSet(1L); assertTrue(dal.compareAndSet(1L, 5L).succeeded()); assertFalse(dal.compareAndSet(1L, 5L).succeeded()); doIncrement.set(true); assertFalse(dal.compareAndSet(5L, 10L).succeeded()); } finally { client.close(); } }
@SuppressWarnings("unchecked") @Override public void received(Channel channel, Object message) throws RemotingException { if (message instanceof MultiMessage) { MultiMessage list = (MultiMessage) message; for (Object obj : list) { try { handler.received(channel, obj); } catch (Throwable t) { logger.error( INTERNAL_ERROR, "unknown error in remoting module", "", "MultiMessageHandler received fail.", t); try { handler.caught(channel, t); } catch (Throwable t1) { logger.error( INTERNAL_ERROR, "unknown error in remoting module", "", "MultiMessageHandler caught fail.", t1); } } } } else { handler.received(channel, message); } }
@Test void test() throws Exception { ChannelHandler handler = Mockito.mock(ChannelHandler.class); Channel channel = Mockito.mock(Channel.class); MultiMessageHandler multiMessageHandler = new MultiMessageHandler(handler); MultiMessage multiMessage = MultiMessage.createFromArray("test1", "test2"); multiMessageHandler.received(channel, multiMessage); // verify ArgumentCaptor<Channel> channelArgumentCaptor = ArgumentCaptor.forClass(Channel.class); ArgumentCaptor<Object> objectArgumentCaptor = ArgumentCaptor.forClass(Object.class); Mockito.verify(handler, Mockito.times(2)) .received(channelArgumentCaptor.capture(), objectArgumentCaptor.capture()); Assertions.assertEquals(objectArgumentCaptor.getAllValues().get(0), "test1"); Assertions.assertEquals(objectArgumentCaptor.getAllValues().get(1), "test2"); Assertions.assertEquals(channelArgumentCaptor.getValue(), channel); Object obj = new Object(); multiMessageHandler.received(channel, obj); // verify Mockito.verify(handler, Mockito.times(3)) .received(channelArgumentCaptor.capture(), objectArgumentCaptor.capture()); Assertions.assertEquals(objectArgumentCaptor.getValue(), obj); Assertions.assertEquals(channelArgumentCaptor.getValue(), channel); RuntimeException runtimeException = new RuntimeException(); Mockito.doThrow(runtimeException).when(handler).received(Mockito.any(), Mockito.any()); multiMessageHandler.received(channel, multiMessage); // verify ArgumentCaptor<Throwable> throwableArgumentCaptor = ArgumentCaptor.forClass(Throwable.class); Mockito.verify(handler, Mockito.times(2)) .caught(channelArgumentCaptor.capture(), throwableArgumentCaptor.capture()); Assertions.assertEquals(throwableArgumentCaptor.getAllValues().get(0), runtimeException); Assertions.assertEquals(throwableArgumentCaptor.getAllValues().get(1), runtimeException); Assertions.assertEquals(channelArgumentCaptor.getValue(), channel); }
@Override public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, CONSUMER_GROUP_ID_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY); MirrorUtils.validateSourcePartitionPartition(sourcePartition); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true); } // We don't actually use these offsets in the task class, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsIncorrectOffsetKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); Map<Map<String, ?>, Map<String, ?>> offsets = Collections.singletonMap( sourcePartition("consumer-app-5", "t1", 2), Collections.singletonMap("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); }
@Override public void store(Measure newMeasure) { saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure); }
@Test public void should_save_file_measure() { DefaultInputFile file = new TestInputFileBuilder("foo", "src/Foo.php") .build(); underTest.store(new DefaultMeasure() .on(file) .forMetric(CoreMetrics.NCLOC) .withValue(10)); ScannerReport.Measure m = reportReader.readComponentMeasures(file.scannerId()).next(); assertThat(m.getIntValue().getValue()).isEqualTo(10); assertThat(m.getMetricKey()).isEqualTo(CoreMetrics.NCLOC_KEY); }
public BlockingDirectBinaryEncoder(OutputStream out) { super(out); this.buffers = new ArrayList<>(); this.stashedBuffers = new ArrayDeque<>(); this.blockItemCounts = new ArrayDeque<>(); }
@Test void blockingDirectBinaryEncoder() throws IOException, NoSuchAlgorithmException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); BinaryEncoder encoder = EncoderFactory.get().blockingDirectBinaryEncoder(baos, null); // This is needed because there is no BlockingDirectBinaryEncoder // BinaryMessageWriter // available out of the box encoder.writeFixed(new byte[] { (byte) 0xC3, (byte) 0x01 }); encoder.writeFixed(SchemaNormalization.parsingFingerprint("CRC-64-AVRO", TestRecordWithMapsAndArrays.SCHEMA$)); // Array this.writeToArray(encoder, new int[] { 1, 2, 3, 4, 5 }); // Map writeToMap(encoder, new long[] { 1L, 2L, 3L, 4L, 5L }); // Nested Array encoder.writeArrayStart(); encoder.setItemCount(2); this.writeToArray(encoder, new int[] { 1, 2 }); this.writeToArray(encoder, new int[] { 3, 4, 5 }); encoder.writeArrayEnd(); // Nested Map encoder.writeMapStart(); encoder.setItemCount(2); encoder.writeString("first"); this.writeToMap(encoder, new long[] { 1L, 2L }); encoder.writeString("second"); this.writeToMap(encoder, new long[] { 3L, 4L, 5L }); encoder.writeMapEnd(); // Read encoder.flush(); BinaryMessageDecoder<TestRecordWithMapsAndArrays> decoder = TestRecordWithMapsAndArrays.getDecoder(); TestRecordWithMapsAndArrays r = decoder.decode(baos.toByteArray()); assertThat(r.getArr(), is(Arrays.asList("1", "2", "3", "4", "5"))); Map<String, Long> map = r.getMap(); assertThat(map.size(), is(5)); for (long i = 1; i <= 5; i++) { assertThat(map.get(Long.toString(i)), is(i)); } assertThat(r.getNestedArr(), is(Arrays.asList(Arrays.asList("1", "2"), Arrays.asList("3", "4", "5")))); Map<String, Map<String, Long>> nestedMap = r.getNestedMap(); assertThat(nestedMap.size(), is(2)); assertThat(nestedMap.get("first").size(), is(2)); assertThat(nestedMap.get("first").get("1"), is(1L)); assertThat(nestedMap.get("first").get("2"), is(2L)); assertThat(nestedMap.get("second").size(), is(3)); assertThat(nestedMap.get("second").get("3"), is(3L)); assertThat(nestedMap.get("second").get("4"), is(4L)); assertThat(nestedMap.get("second").get("5"), is(5L)); }
@SuppressWarnings("DataFlowIssue") public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitExecutor(); case COM_INIT_DB: return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession); case COM_FIELD_LIST: return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession); case COM_QUERY: return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession); case COM_PING: return new MySQLComPingExecutor(connectionSession); case COM_STMT_PREPARE: return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession); case COM_STMT_EXECUTE: return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession); case COM_STMT_RESET: return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession); case COM_STMT_CLOSE: return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession); case COM_SET_OPTION: return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession); case COM_RESET_CONNECTION: return new MySQLComResetConnectionExecutor(connectionSession); default: return new MySQLUnsupportedCommandExecutor(commandPacketType); } }
@Test void assertNewInstanceWithComQuit() throws SQLException { assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_QUIT, mock(CommandPacket.class), connectionSession), instanceOf(MySQLComQuitExecutor.class)); }
public static String[] splitSafeQuote(String input, char separator) { return splitSafeQuote(input, separator, true, false); }
@Test public void testSplitBeanParametersTrim() throws Exception { String[] arr = StringQuoteHelper.splitSafeQuote("String.class ${body}, String.class Mars", ',', true, true); Assertions.assertEquals(2, arr.length); Assertions.assertEquals("String.class ${body}", arr[0]); Assertions.assertEquals("String.class Mars", arr[1]); arr = StringQuoteHelper.splitSafeQuote(" String.class ${body} , String.class Mars ", ',', true, true); Assertions.assertEquals(2, arr.length); Assertions.assertEquals("String.class ${body}", arr[0]); Assertions.assertEquals("String.class Mars", arr[1]); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowFunctionStatusStatement) { return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowProcedureStatusStatement) { return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowTablesStatement) { return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType())); } return Optional.empty(); }
@Test void assertCreateWithSelectStatementForTransactionReadOnly() { initProxyContext(Collections.emptyMap()); MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class); when(selectStatement.getFrom()).thenReturn(Optional.empty()); ProjectionsSegment projectionsSegment = mock(ProjectionsSegment.class); VariableSegment variableSegment = new VariableSegment(0, 0, "transaction_read_only"); variableSegment.setScope("SESSION"); when(projectionsSegment.getProjections()).thenReturn(Collections.singletonList(new ExpressionProjectionSegment(0, 10, "@@session.transaction_read_only", variableSegment))); when(selectStatement.getProjections()).thenReturn(projectionsSegment); when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement); Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "select @@session.transaction_read_only", "", Collections.emptyList()); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(MySQLSystemVariableQueryExecutor.class)); }
public ForComputation forComputation(String computation) { return new ForComputation(computation); }
@Test public void testMultipleKeys() throws Exception { TestStateTag tag = new TestStateTag("tag1"); WindmillStateCache.ForKeyAndFamily keyCache1 = cache .forComputation("comp1") .forKey(computationKey("comp1", "key1", SHARDING_KEY), 0L, 0L) .forFamily(STATE_FAMILY); WindmillStateCache.ForKeyAndFamily keyCache2 = cache .forComputation("comp1") .forKey(computationKey("comp1", "key2", SHARDING_KEY), 0L, 10L) .forFamily(STATE_FAMILY); WindmillStateCache.ForKeyAndFamily keyCache3 = cache .forComputation("comp2") .forKey(computationKey("comp2", "key1", SHARDING_KEY), 0L, 0L) .forFamily(STATE_FAMILY); TestState state1 = new TestState("g1"); keyCache1.put(StateNamespaces.global(), tag, state1, 2); assertEquals(Optional.of(state1), keyCache1.get(StateNamespaces.global(), tag)); keyCache1.persist(); keyCache1 = cache .forComputation("comp1") .forKey(computationKey("comp1", "key1", SHARDING_KEY), 0L, 1L) .forFamily(STATE_FAMILY); assertEquals(Optional.of(state1), keyCache1.get(StateNamespaces.global(), tag)); assertEquals(Optional.empty(), keyCache2.get(StateNamespaces.global(), tag)); assertEquals(Optional.empty(), keyCache3.get(StateNamespaces.global(), tag)); TestState state2 = new TestState("g2"); keyCache2.put(StateNamespaces.global(), tag, state2, 2); keyCache2.persist(); assertEquals(Optional.of(state2), keyCache2.get(StateNamespaces.global(), tag)); keyCache2 = cache .forComputation("comp1") .forKey(computationKey("comp1", "key2", SHARDING_KEY), 0L, 20L) .forFamily(STATE_FAMILY); assertEquals(Optional.of(state2), keyCache2.get(StateNamespaces.global(), tag)); assertEquals(Optional.of(state1), keyCache1.get(StateNamespaces.global(), tag)); assertEquals(Optional.empty(), keyCache3.get(StateNamespaces.global(), tag)); }
public static List<KiePMMLFieldOperatorValue> getConstraintEntriesFromXOrCompoundPredicate(final CompoundPredicate compoundPredicate, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) { if (!CompoundPredicate.BooleanOperator.XOR.equals(compoundPredicate.getBooleanOperator())) { throw new KiePMMLException(String.format("getConstraintEntriesFromXOrCompoundPredicate invoked with %s CompoundPredicate", compoundPredicate.getBooleanOperator())); } // Managing only SimplePredicates for the moment being final List<Predicate> simplePredicates = compoundPredicate.getPredicates().stream().filter(predicate -> predicate instanceof SimplePredicate).collect(Collectors.toList()); if (simplePredicates.size() < 2) { throw new KiePMMLException("At least two elements expected for XOR operations"); } if (simplePredicates.size() > 2) { // Not managed yet throw new KiePMMLException("More then two elements not managed, yet, for XOR operations"); } return getXORConstraintEntryFromSimplePredicates(simplePredicates, fieldTypeMap); }
@Test void getConstraintEntriesFromXOrCompoundPredicate() { CompoundPredicate compoundPredicate = new CompoundPredicate(); compoundPredicate.setBooleanOperator(CompoundPredicate.BooleanOperator.XOR); List<Predicate> predicates = IntStream.range(0, 2).mapToObj(index -> simplePredicates.get(index)).collect(Collectors.toList()); compoundPredicate.getPredicates().addAll(predicates); List<KiePMMLFieldOperatorValue> retrieved = KiePMMLASTFactoryUtils .getConstraintEntriesFromXOrCompoundPredicate(compoundPredicate, fieldTypeMap); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSameSizeAs(predicates); commonVerifyKiePMMLFieldOperatorValueList(retrieved, null); }
@Override public ModelMBean assemble(Object obj, ObjectName name) throws JMException { ModelMBeanInfo mbi = null; // use the default provided mbean which has been annotated with JMX annotations LOGGER.trace("Assembling MBeanInfo for: {} from @ManagedResource object: {}", name, obj); mbi = assembler.getMBeanInfo(obj, null, name.toString()); if (mbi == null) { return null; } RequiredModelMBean mbean = new RequiredModelMBean(mbi); try { mbean.setManagedResource(obj, "ObjectReference"); } catch (InvalidTargetObjectTypeException e) { throw new JMException(e.getMessage()); } // Allows the managed object to send notifications if (obj instanceof NotificationSenderAware) { ((NotificationSenderAware) obj).setNotificationSender(new NotificationSenderAdapter(mbean)); } return mbean; }
@Test public void testHappyPath() throws MalformedObjectNameException, JMException { TestMbean testMbean = new TestMbean(); ModelMBean mbean = defaultManagementMBeanAssembler.assemble(testMbean, new ObjectName("org.flowable.jmx.Mbeans:type=something")); assertThat(mbean).isNotNull(); assertThat(mbean.getMBeanInfo()).isNotNull(); assertThat(mbean.getMBeanInfo().getAttributes()).isNotNull(); MBeanAttributeInfo[] attributes = mbean.getMBeanInfo().getAttributes(); assertThat(attributes).hasSize(2); assertThat(("TestAttributeString".equals(attributes[0].getName()) && "TestAttributeBoolean".equals(attributes[1].getName()) || ( "TestAttributeString".equals(attributes[1].getName()) && "TestAttributeBoolean".equals(attributes[0] .getName())))).isTrue(); assertThat(mbean.getMBeanInfo().getOperations()).isNotNull(); MBeanOperationInfo[] operations = mbean.getMBeanInfo().getOperations(); assertThat(operations).hasSize(3); }
@VisibleForTesting String buildBody(Stream stream, AlertCondition.CheckResult checkResult, List<Message> backlog) { final String template; if (pluginConfig == null || pluginConfig.getString("body") == null) { template = bodyTemplate; } else { template = pluginConfig.getString("body"); } Map<String, Object> model = getModel(stream, checkResult, backlog); return this.templateEngine.transform(template, model); }
@Test public void defaultBodyTemplateDoesNotShowBacklogIfBacklogIsEmpty() throws Exception { FormattedEmailAlertSender emailAlertSender = new FormattedEmailAlertSender(new EmailConfiguration(), mockNotificationService, nodeId, templateEngine, emailFactory); Stream stream = mock(Stream.class); when(stream.getId()).thenReturn("123456"); when(stream.getTitle()).thenReturn("Stream Title"); AlertCondition alertCondition = mock(AlertCondition.class); AlertCondition.CheckResult checkResult = mock(AbstractAlertCondition.CheckResult.class); when(checkResult.getTriggeredAt()).thenReturn(new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC)); when(checkResult.getTriggeredCondition()).thenReturn(alertCondition); String body = emailAlertSender.buildBody(stream, checkResult, Collections.<Message>emptyList()); assertThat(body) .contains("<No backlog>\n") .doesNotContain("Last messages accounting for this alert:\n"); }
@Override public InputStream getInputStream(final int columnIndex, final String type) throws SQLException { return mergedResult.getInputStream(columnIndex, type); }
@Test void assertGetInputStream() throws SQLException { InputStream inputStream = mock(InputStream.class); when(mergedResult.getInputStream(1, "asc")).thenReturn(inputStream); assertThat(new MaskMergedResult(mock(MaskRule.class), mock(SelectStatementContext.class), mergedResult).getInputStream(1, "asc"), is(inputStream)); }
public void clear() { for (int i = 0; i < sections.length; i++) { sections[i].clear(); } }
@Test public void testClear() { ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder() .expectedItems(2) .concurrencyLevel(1) .autoShrink(true) .mapIdleFactor(0.25f) .build(); assertTrue(map.capacity() == 4); assertNull(map.put(1, "v1")); assertNull(map.put(2, "v2")); assertNull(map.put(3, "v3")); assertTrue(map.capacity() == 8); map.clear(); assertTrue(map.capacity() == 4); }
@Override public void initialize(String name, Map<String, String> properties) { String uri = properties.get(CatalogProperties.URI); Preconditions.checkArgument(null != uri, "JDBC connection URI is required"); try { // We'll ensure the expected JDBC driver implementation class is initialized through // reflection regardless of which classloader ends up using this JdbcSnowflakeClient, but // we'll only warn if the expected driver fails to load, since users may use repackaged or // custom JDBC drivers for Snowflake communication. Class.forName(JdbcSnowflakeClient.EXPECTED_JDBC_IMPL); } catch (ClassNotFoundException cnfe) { LOG.warn( "Failed to load expected JDBC SnowflakeDriver - if queries fail by failing" + " to find a suitable driver for jdbc:snowflake:// URIs, you must add the Snowflake " + " JDBC driver to your jars/packages", cnfe); } // The uniqueAppIdentifier should be less than 50 characters, so trimming the guid. String uniqueId = UUID.randomUUID().toString().replace("-", "").substring(0, UNIQUE_ID_LENGTH); String uniqueAppIdentifier = APP_IDENTIFIER + "_" + uniqueId; String userAgentSuffix = IcebergBuild.fullVersion() + " " + uniqueAppIdentifier; // Populate application identifier in jdbc client properties.put(JdbcCatalog.PROPERTY_PREFIX + JDBC_APPLICATION_PROPERTY, uniqueAppIdentifier); // Adds application identifier to the user agent header of the JDBC requests. properties.put(JdbcCatalog.PROPERTY_PREFIX + JDBC_USER_AGENT_SUFFIX_PROPERTY, userAgentSuffix); JdbcClientPool connectionPool = new JdbcClientPool(uri, properties); initialize(name, new JdbcSnowflakeClient(connectionPool), new FileIOFactory(), properties); }
@Test public void testInitializeNullClient() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy( () -> catalog.initialize(TEST_CATALOG_NAME, null, fakeFileIOFactory, properties)) .withMessageContaining("snowflakeClient must be non-null"); }
@Nullable @Override public byte[] chunk(@NonNull final byte[] message, @IntRange(from = 0) final int index, @IntRange(from = 20) final int maxLength) { final int offset = index * maxLength; final int length = Math.min(maxLength, message.length - offset); if (length <= 0) return null; final byte[] data = new byte[length]; System.arraycopy(message, offset, data, 0, length); return data; }
@Test public void chunk_end() { final int MTU = 23; final DefaultMtuSplitter splitter = new DefaultMtuSplitter(); final byte[] result = splitter.chunk(text.getBytes(), 200, MTU - 3); assertNull(result); }
@Override public Map<String, Optional<HivePartitionDataInfo>> getPartitionDataInfos() { Map<String, Optional<HivePartitionDataInfo>> partitionDataInfos = Maps.newHashMap(); List<String> partitionNameToFetch = partitionNames; if (partitionLimit >= 0 && partitionLimit < partitionNames.size()) { partitionNameToFetch = partitionNames.subList(partitionNames.size() - partitionLimit, partitionNames.size()); } GetRemoteFilesParams params = GetRemoteFilesParams.newBuilder().setPartitionNames(partitionNames).setCheckPartitionExistence(false).build(); List<RemoteFileInfo> remoteFileInfos = GlobalStateMgr.getCurrentState().getMetadataMgr().getRemoteFiles(table, params); for (int i = 0; i < partitionNameToFetch.size(); i++) { RemoteFileInfo remoteFileInfo = remoteFileInfos.get(i); List<RemoteFileDesc> remoteFileDescs = remoteFileInfo.getFiles(); if (remoteFileDescs != null) { long lastFileModifiedTime = Long.MIN_VALUE; int fileNumber = remoteFileDescs.size(); Optional<RemoteFileDesc> maxLastModifiedTimeFile = remoteFileDescs.stream() .max(Comparator.comparingLong(RemoteFileDesc::getModificationTime)); if (maxLastModifiedTimeFile.isPresent()) { lastFileModifiedTime = maxLastModifiedTimeFile.get().getModificationTime(); } HivePartitionDataInfo hivePartitionDataInfo = new HivePartitionDataInfo(lastFileModifiedTime, fileNumber); partitionDataInfos.put(partitionNameToFetch.get(i), Optional.of(hivePartitionDataInfo)); } else { partitionDataInfos.put(partitionNameToFetch.get(i), Optional.empty()); } } return partitionDataInfos; }
@Test public void testGetPartitionDataInfos(@Mocked MetadataMgr metadataMgr) { List<RemoteFileInfo> remoteFileInfos = createRemoteFileInfos(4); new Expectations() { { GlobalStateMgr.getCurrentState().getMetadataMgr(); result = metadataMgr; minTimes = 0; metadataMgr.getRemoteFiles((Table) any, (GetRemoteFilesParams) any); result = remoteFileInfos; minTimes = 0; } }; String location = "oss://bucket_name/lineorder_part"; HiveTable hiveTable = createHiveTable(location); List<String> partitionNames = Lists.newArrayList( "date=20240501", "date=20240502", "date=20240503", "date=20240504"); TableUpdateArbitrator.UpdateContext updateContext = new TableUpdateArbitrator.UpdateContext(hiveTable, -1, partitionNames); TableUpdateArbitrator arbitrator = TableUpdateArbitrator.create(updateContext); Assert.assertTrue(arbitrator instanceof ObjectBasedUpdateArbitrator); Map<String, Optional<HivePartitionDataInfo>> hivePartitionDataInfo = arbitrator.getPartitionDataInfos(); Assert.assertEquals(4, hivePartitionDataInfo.size()); Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240501")); Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240502")); Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240503")); Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240504")); }
public void writeProcessInformations(List<ProcessInformations> processInformations) throws IOException { try { document.open(); addParagraph(getString("Processus"), "processes.png"); new PdfProcessInformationsReport(processInformations, document).toPdf(); } catch (final DocumentException e) { throw createIOException(e); } document.close(); }
@Test public void testWriteProcessInformations() throws IOException { final ByteArrayOutputStream output = new ByteArrayOutputStream(); PdfOtherReport pdfOtherReport = new PdfOtherReport(TEST_APP, output); pdfOtherReport.writeProcessInformations(ProcessInformations.buildProcessInformations( getClass().getResourceAsStream("/tasklist.txt"), true, false)); assertNotEmptyAndClear(output); pdfOtherReport = new PdfOtherReport(TEST_APP, output); pdfOtherReport.writeProcessInformations(ProcessInformations .buildProcessInformations(getClass().getResourceAsStream("/ps.txt"), false, false)); assertNotEmptyAndClear(output); pdfOtherReport = new PdfOtherReport(TEST_APP, output); pdfOtherReport.writeProcessInformations( Collections.singletonMap("localhost", ProcessInformations.buildProcessInformations( getClass().getResourceAsStream("/ps.txt"), false, false))); assertNotEmptyAndClear(output); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { ZonedDateTime dateTime = timeIndicator.getValue(index); LocalTime localTime = dateTime.toLocalTime(); final boolean satisfied = timeRanges.stream() .anyMatch( timeRange -> !localTime.isBefore(timeRange.getFrom()) && !localTime.isAfter(timeRange.getTo())); traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfiedForBuy() { final DateTimeFormatter dtf = DateTimeFormatter.ISO_ZONED_DATE_TIME; DateTimeIndicator dateTimeIndicator = new DateTimeIndicator( new MockBarSeries(numFunction, new double[] { 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100 }, new ZonedDateTime[] { ZonedDateTime.parse("2019-09-17T00:00:00-00:00", dtf), // Index=0 ZonedDateTime.parse("2019-09-17T05:00:00-00:00", dtf), // 1 ZonedDateTime.parse("2019-09-17T07:00:00-00:00", dtf), // 2 ZonedDateTime.parse("2019-09-17T08:00:00-00:00", dtf), // 3 ZonedDateTime.parse("2019-09-17T15:00:00-00:00", dtf), // 4 ZonedDateTime.parse("2019-09-17T15:05:00-00:00", dtf), // 5 ZonedDateTime.parse("2019-09-17T16:59:00-00:00", dtf), // 6 ZonedDateTime.parse("2019-09-17T17:05:00-00:00", dtf), // 7 ZonedDateTime.parse("2019-09-17T23:00:00-00:00", dtf), // 8 ZonedDateTime.parse("2019-09-17T23:30:00-00:00", dtf), // 9 ZonedDateTime.parse("2019-09-17T23:35:00-00:00", dtf) // 10 }), Bar::getBeginTime); TimeRangeRule rule = new TimeRangeRule( Arrays.asList(new TimeRangeRule.TimeRange(LocalTime.of(0, 0), LocalTime.of(4, 0)), new TimeRangeRule.TimeRange(LocalTime.of(6, 0), LocalTime.of(7, 0)), new TimeRangeRule.TimeRange(LocalTime.of(12, 0), LocalTime.of(15, 0)), new TimeRangeRule.TimeRange(LocalTime.of(17, 0), LocalTime.of(21, 0)), new TimeRangeRule.TimeRange(LocalTime.of(22, 0), LocalTime.of(23, 30))), dateTimeIndicator); assertTrue(rule.isSatisfied(0, null)); assertFalse(rule.isSatisfied(1, null)); assertTrue(rule.isSatisfied(2, null)); assertFalse(rule.isSatisfied(3, null)); assertTrue(rule.isSatisfied(4, null)); assertFalse(rule.isSatisfied(5, null)); assertFalse(rule.isSatisfied(6, null)); assertTrue(rule.isSatisfied(7, null)); assertTrue(rule.isSatisfied(8, null)); assertTrue(rule.isSatisfied(9, null)); assertFalse(rule.isSatisfied(10, null)); }
public void setFilePaths(String... filePaths) { Path[] paths = new Path[filePaths.length]; for (int i = 0; i < paths.length; i++) { paths[i] = new Path(filePaths[i]); } setFilePaths(paths); }
@Test void testMultiPathSetOnSinglePathIF2() { final DummyFileInputFormat format = new DummyFileInputFormat(); final String myPath = "/an/imaginary/path"; final String myPath2 = "/an/imaginary/path2"; // format.setFilePaths(new Path(myPath), new Path(myPath2)); assertThatThrownBy(() -> format.setFilePaths(new Path(myPath), new Path(myPath2))) .isInstanceOf(UnsupportedOperationException.class); }
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) { // Come up with final set of OffsetRanges to read (account for new partitions, limit number of events) long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE); long numEvents; if (sourceLimit == Long.MAX_VALUE) { numEvents = maxEventsToReadFromKafka; LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka); } else { numEvents = sourceLimit; } long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS); LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions); return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics); }
@Test public void testGetNextOffsetRangesFromSingleOffsetCheckpointNotApplicable() { testUtils.createTopic(testTopicName, 2); KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("latest", KAFKA_CHECKPOINT_TYPE_SINGLE_OFFSET)); // incorrect number of partitions => exception (number of partitions is more than 1) String lastCheckpointString = "250"; Exception exception = assertThrows(HoodieException.class, () -> kafkaOffsetGen.getNextOffsetRanges(Option.of(lastCheckpointString), 500, metrics)); assertTrue(exception.getMessage().startsWith("Kafka topic " + testTopicName + " has 2 partitions (more than 1)")); }
public static <T> AsSingleton<T> asSingleton() { return new AsSingleton<>(); }
@Test @Category(ValidatesRunner.class) public void testDiscardingNonSingletonSideInput() throws Exception { PCollection<Integer> oneTwoThree = pipeline.apply(Create.of(1, 2, 3)); final PCollectionView<Integer> view = oneTwoThree .apply(Window.<Integer>configure().discardingFiredPanes()) .apply(View.asSingleton()); oneTwoThree.apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { c.output(c.sideInput(view)); } }) .withSideInputs(view)); // As long as we get an error, be flexible with how a runner surfaces it thrown.expect(Exception.class); pipeline.run(); }
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, @Nullable String query, MessageFilterTypeDTO filterQueryType, @Nullable Integer pageSize, SeekDirectionDTO seekDirection, @Nullable String keySerde, @Nullable String valueSerde) { return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query, filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde)); }
@Test void loadMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService .loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String")) .expectError(TopicNotFoundException.class) .verify(); }
public boolean ifHasZero(int... nums) { LOGGER.info("Arithmetic check zero {}", VERSION); return !source.ifNonZero(nums); }
@Test void testIfHasZero() { assertTrue(arithmetic.ifHasZero(-1, 0, 1)); }
public byte[] getNextTag() { byte[] tagBytes = null; if (tagPool != null) { tagBytes = tagPool.pollFirst(); } if (tagBytes == null) { long tag = nextTagId++; int size = encodingSize(tag); tagBytes = new byte[size]; for (int i = 0; i < size; ++i) { tagBytes[size - 1 - i] = (byte) (tag >>> (i * 8)); } } return tagBytes; }
@Test public void testTagValueMatchesParsedArray() throws IOException { AmqpTransferTagGenerator tagGen = new AmqpTransferTagGenerator(false); for (int i = 0; i < Short.MAX_VALUE; ++i) { byte[] tag = tagGen.getNextTag(); ByteArrayInputStream bais = new ByteArrayInputStream(tag); DataInputStream dis = new DataInputStream(bais); if (i < 256) { assertEquals(1, tag.length); assertEquals((byte) i, dis.readByte()); } else { assertEquals(2, tag.length); assertEquals(i, dis.readShort()); } } }
@Override public long getMin() { if (values.length == 0) { return 0; } return values[0]; }
@Test public void calculatesAMinOfZeroForAnEmptySnapshot() throws Exception { final Snapshot emptySnapshot = new UniformSnapshot(new long[]{ }); assertThat(emptySnapshot.getMin()) .isZero(); }
public List<PluginRoleConfig> getPluginRoleConfigs() { return filterRolesBy(PluginRoleConfig.class); }
@Test public void getPluginRoleConfigsShouldReturnOnlyPluginRoles() { Role admin = new RoleConfig(new CaseInsensitiveString("admin")); Role view = new RoleConfig(new CaseInsensitiveString("view")); Role blackbird = new PluginRoleConfig("blackbird", "foo"); Role spacetiger = new PluginRoleConfig("spacetiger", "foo"); RolesConfig rolesConfig = new RolesConfig(admin, blackbird, view, spacetiger); List<PluginRoleConfig> roles = rolesConfig.getPluginRoleConfigs(); assertThat(roles, hasSize(2)); assertThat(roles, contains(blackbird, spacetiger)); }
@InvokeOnHeader(Web3jConstants.ETH_SEND_TRANSACTION) void ethSendTransaction(Message message) throws IOException { String fromAddress = message.getHeader(Web3jConstants.FROM_ADDRESS, configuration::getFromAddress, String.class); String toAddress = message.getHeader(Web3jConstants.TO_ADDRESS, configuration::getToAddress, String.class); BigInteger nonce = message.getHeader(Web3jConstants.NONCE, configuration::getNonce, BigInteger.class); BigInteger gasPrice = message.getHeader(Web3jConstants.GAS_PRICE, configuration::getGasPrice, BigInteger.class); BigInteger gasLimit = message.getHeader(Web3jConstants.GAS_LIMIT, configuration::getGasLimit, BigInteger.class); BigInteger value = message.getHeader(Web3jConstants.VALUE, configuration::getValue, BigInteger.class); String data = message.getHeader(Web3jConstants.DATA, configuration::getData, String.class); org.web3j.protocol.core.methods.request.Transaction transaction = new org.web3j.protocol.core.methods.request.Transaction( fromAddress, nonce, gasPrice, gasLimit, toAddress, value, data); Request<?, EthSendTransaction> request = web3j.ethSendTransaction(transaction); setRequestId(message, request); EthSendTransaction response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransactionHash()); } }
@Test public void ethSendTransactionTest() throws Exception { EthSendTransaction response = Mockito.mock(EthSendTransaction.class); Mockito.when(mockWeb3j.ethSendTransaction(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getTransactionHash()).thenReturn("test"); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SEND_TRANSACTION); template.send(exchange); String body = exchange.getIn().getBody(String.class); assertEquals("test", body); }
public Object evaluate(final ProcessingDTO processingDTO, final List<Object> paramValues) { final List<KiePMMLNameValue> kiePMMLNameValues = new ArrayList<>(); if (parameterFields != null) { if (paramValues == null || paramValues.size() < parameterFields.size()) { throw new IllegalArgumentException("Expected at least " + parameterFields.size() + " arguments for " + name + " DefineFunction"); } for (int i = 0; i < parameterFields.size(); i++) { kiePMMLNameValues.add(new KiePMMLNameValue(parameterFields.get(i).getName(), paramValues.get(i))); } } for (KiePMMLNameValue kiePMMLNameValue : kiePMMLNameValues) { processingDTO.addKiePMMLNameValue(kiePMMLNameValue); } return commonEvaluate(kiePMMLExpression.evaluate(processingDTO), dataType); }
@Test void evaluateEmptyParamValues() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> { final KiePMMLParameterField parameterField1 = KiePMMLParameterField.builder(PARAM_1, Collections.emptyList ()).build(); final KiePMMLParameterField parameterField2 = KiePMMLParameterField.builder(PARAM_2, Collections.emptyList ()).build(); final KiePMMLDefineFunction defineFunction = new KiePMMLDefineFunction(CUSTOM_FUNCTION, Collections .emptyList(), null, OP_TYPE.CONTINUOUS, Arrays.asList(parameterField1, parameterField2), null); ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList()); defineFunction.evaluate(processingDTO, Collections.emptyList()); }); }
public static boolean equal(Comparable lhs, Comparable rhs) { assert lhs != null; if (rhs == null) { return false; } if (lhs.getClass() == rhs.getClass()) { return lhs.equals(rhs); } if (lhs instanceof Number lhsNumber && rhs instanceof Number rhsNumber) { return Numbers.equal(lhsNumber, rhsNumber); } return lhs.equals(rhs); }
@Test public void testEqual() { assertFalse(equal(1, null)); assertFalse(equal(1, 2)); assertFalse(equal(1, 1.1)); assertFalse(equal("foo", "bar")); assertFalse(equal("foo", 1)); assertFalse(equal(1.0, "foo")); assertFalse(equal(1.0, "1.0")); assertTrue(equal(1, 1)); assertTrue(equal("foo", "foo")); }
public StepExpression createExpression(StepDefinition stepDefinition) { List<ParameterInfo> parameterInfos = stepDefinition.parameterInfos(); if (parameterInfos.isEmpty()) { return createExpression( stepDefinition.getPattern(), stepDefinitionDoesNotTakeAnyParameter(stepDefinition), false); } ParameterInfo parameterInfo = parameterInfos.get(parameterInfos.size() - 1); return createExpression( stepDefinition.getPattern(), parameterInfo.getTypeResolver()::resolve, parameterInfo.isTransposed()); }
@Test void creates_a_step_expression() { StepDefinition stepDefinition = new StubStepDefinition("Given a step"); StepExpression expression = stepExpressionFactory.createExpression(stepDefinition); assertThat(expression.getSource(), is("Given a step")); assertThat(expression.getExpressionType(), is(CucumberExpression.class)); assertThat(expression.match("Given a step"), is(emptyList())); }
public static <T> AsIterable<T> asIterable() { return new AsIterable<>(); }
@Test @Category(ValidatesRunner.class) public void testIterableSideInputIsImmutable() { final PCollectionView<Iterable<Integer>> view = pipeline.apply("CreateSideInput", Create.of(11)).apply(View.asIterable()); PCollection<Integer> output = pipeline .apply("CreateMainInput", Create.of(29)) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { Iterator<Integer> iterator = c.sideInput(view).iterator(); while (iterator.hasNext()) { try { iterator.remove(); fail("Expected UnsupportedOperationException on remove()"); } catch (UnsupportedOperationException expected) { } c.output(iterator.next()); } } }) .withSideInputs(view)); // Pass at least one value through to guarantee that DoFn executes. PAssert.that(output).containsInAnyOrder(11); pipeline.run(); }
@Override public List<String> choices() { if (commandLine.getArguments() == null) { return Collections.emptyList(); } List<String> argList = Lists.newArrayList(); String argOne = null; if (argList.size() > 1) { argOne = argList.get(1); } VplsCommandEnum vplsCommandEnum = VplsCommandEnum.enumFromString(argOne); if (vplsCommandEnum != null) { switch (vplsCommandEnum) { case CREATE: case LIST: return Collections.emptyList(); default: VplsCommandEnum.toStringList(); } } return VplsCommandEnum.toStringList(); }
@Test public void testCommandCompleter() { VplsCommandCompleter commandCompleter = new VplsCommandCompleter(); List<String> choices = commandCompleter.choices(); List<String> expected = VplsCommandEnum.toStringList(); assertEquals(expected, choices); }
public String format(Date then) { if (then == null) then = now(); Duration d = approximateDuration(then); return format(d); }
@Test public void testMonthsAgo() throws Exception { PrettyTime t = new PrettyTime(now); Assert.assertEquals("3 months ago", t.format(now.minusMonths(3))); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_no_value_if_dto_has_no_value_for_String_Metric() { Optional<Measure> measure = underTest.toMeasure(EMPTY_BATCH_MEASURE, SOME_STRING_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE); }
@Override public synchronized void connect() throws AlluxioStatusException { if (mConnected) { return; } disconnect(); Preconditions.checkState(!mClosed, "Client is closed, will not try to connect."); IOException lastConnectFailure = null; RetryPolicy retryPolicy = mRetryPolicySupplier.get(); while (retryPolicy.attempt()) { if (mClosed) { throw new FailedPreconditionException("Failed to connect: client has been closed"); } // Re-query the address in each loop iteration in case it has changed (e.g. master // failover). try { mServerAddress = queryGrpcServerAddress(); } catch (UnavailableException e) { LOG.debug("Failed to determine {} rpc address ({}): {}", getServiceName(), retryPolicy.getAttemptCount(), e.toString()); continue; } try { beforeConnect(); LOG.debug("Alluxio client (version {}) is trying to connect with {} @ {}", RuntimeConstants.VERSION, getServiceName(), mServerAddress); // set up rpc group channel mChannel = createChannel(); // Create stub for version service on host mVersionService = ServiceVersionClientServiceGrpc.newBlockingStub(mChannel); mConnected = true; afterConnect(); checkVersion(getServiceVersion()); LOG.debug("Alluxio client (version {}) is connected with {} @ {}", RuntimeConstants.VERSION, getServiceName(), mServerAddress); return; } catch (IOException e) { LOG.debug("Failed to connect ({}) with {} @ {}", retryPolicy.getAttemptCount(), getServiceName(), mServerAddress, e); lastConnectFailure = e; if (e instanceof UnauthenticatedException) { // If there has been a failure in opening GrpcChannel, it's possible because // the authentication credential has expired. Re-login. mContext.getUserState().relogin(); } if (e instanceof NotFoundException) { // service is not found in the server, skip retry break; } } } // Reaching here indicates that we did not successfully connect. if (mChannel != null) { mChannel.shutdown(); } if (mServerAddress == null) { throw new UnavailableException( String.format("Failed to determine address for %s after %s attempts", getServiceName(), retryPolicy.getAttemptCount())); } /* * Throw as-is if {@link UnauthenticatedException} occurred. */ if (lastConnectFailure instanceof UnauthenticatedException) { throw (AlluxioStatusException) lastConnectFailure; } if (lastConnectFailure instanceof NotFoundException) { throw new NotFoundException(lastConnectFailure.getMessage(), new ServiceNotFoundException(lastConnectFailure.getMessage(), lastConnectFailure)); } throw new UnavailableException( String.format( "Failed to connect to master (%s) after %s attempts." + "Please check if Alluxio master is currently running on \"%s\". Service=\"%s\"", mServerAddress, retryPolicy.getAttemptCount(), mServerAddress, getServiceName()), lastConnectFailure); }
@Test public void connectFailToDetermineMasterAddress() throws Exception { alluxio.Client client = new BaseTestClient() { @Override public synchronized InetSocketAddress getRemoteSockAddress() throws UnavailableException { throw new UnavailableException("Failed to determine master address"); } }; mExpectedException.expect(UnavailableException.class); mExpectedException.expectMessage("Failed to determine address for Test Service Name"); client.connect(); }
@Override public TListMaterializedViewStatusResult listMaterializedViewStatus(TGetTablesParams params) throws TException { LOG.debug("get list table request: {}", params); PatternMatcher matcher = null; boolean caseSensitive = CaseSensibility.TABLE.getCaseSensibility(); if (params.isSetPattern()) { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), caseSensitive); } // database privs should be checked in analysis phrase long limit = params.isSetLimit() ? params.getLimit() : -1; UserIdentity currentUser; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } Preconditions.checkState(params.isSetType() && TTableType.MATERIALIZED_VIEW.equals(params.getType())); return listMaterializedViewStatus(limit, matcher, currentUser, params); }
@Test public void testGetSpecialColumnForSyncMv() throws Exception { starRocksAssert.withDatabase("test_table").useDatabase("test_table") .withTable("CREATE TABLE `base1` (\n" + "event_day DATE,\n" + "department_id int(11) NOT NULL COMMENT \"\"\n" + ") ENGINE=OLAP\n" + "DUPLICATE KEY(event_day, department_id)\n" + "DISTRIBUTED BY HASH(department_id) BUCKETS 1\n" + "PROPERTIES (\n" + "\"replication_num\" = \"1\",\n" + "\"in_memory\" = \"false\",\n" + "\"storage_format\" = \"DEFAULT\",\n" + "\"enable_persistent_index\" = \"false\"\n" + ");") .withMaterializedView("create materialized view test_table.mv$test as select event_day from base1"); ConnectContext ctx = starRocksAssert.getCtx(); String createUserSql = "create user test4"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(createUserSql, ctx), ctx); String grantSql = "GRANT SELECT ON TABLE test_table.base1 TO USER `test4`@`%`;"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(grantSql, ctx), ctx); FrontendServiceImpl impl = new FrontendServiceImpl(exeEnv); TGetTablesParams request = new TGetTablesParams(); TUserIdentity userIdentity = new TUserIdentity(); userIdentity.setUsername("test4"); userIdentity.setHost("%"); userIdentity.setIs_domain(false); request.setCurrent_user_ident(userIdentity); request.setPattern("mv$test"); request.setDb("test_table"); request.setType(TTableType.MATERIALIZED_VIEW); TListMaterializedViewStatusResult response = impl.listMaterializedViewStatus(request); Assert.assertEquals(1, response.materialized_views.size()); }
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs, String[][] aliasGroups) { return translateDeprecatedConfigs(configs, Stream.of(aliasGroups) .collect(Collectors.toMap(x -> x[0], x -> Stream.of(x).skip(1).collect(Collectors.toList())))); }
@Test public void testAllowNullOverride() { Map<String, String> config = new HashMap<>(); config.put("foo.bar.deprecated", "baz"); config.put("foo.bar", null); Map<String, String> newConfig = ConfigUtils.translateDeprecatedConfigs(config, new String[][]{ {"foo.bar", "foo.bar.deprecated"} }); assertNotNull(newConfig); assertNull(newConfig.get("foo.bar")); assertNull(newConfig.get("foo.bar.deprecated")); }
@Override public void setRampDownPercent(long rampDownPercent) { Validate.isTrue((rampDownPercent >= 0) && (rampDownPercent < 100), "rampDownPercent must be a value between 0 and 99"); this.rampDownPercent = rampDownPercent; }
@Test(expected = IllegalArgumentException.class) public void testSetRampDownPercent_lessThan0() { sampler.setRampDownPercent(-1); }
@Override public void writeChar(final int v) throws IOException { ensureAvailable(CHAR_SIZE_IN_BYTES); MEM.putChar(buffer, ARRAY_BYTE_BASE_OFFSET + pos, (char) v); pos += CHAR_SIZE_IN_BYTES; }
@Test public void testWriteCharForPositionV() throws Exception { char expected = 100; out.writeChar(2, expected); char actual = Bits.readChar(out.buffer, 2, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN); assertEquals(expected, actual); }
@Udf public Long trunc(@UdfParameter final Long val) { return val; }
@Test public void shouldHandleNullValues() { assertThat(udf.trunc((Integer) null), is((Long) null)); assertThat(udf.trunc((Long) null), is((Long) null)); assertThat(udf.trunc((Double) null), is((Long) null)); assertThat(udf.trunc((Double) null), is((Long) null)); assertThat(udf.trunc((BigDecimal) null), is((BigDecimal) null)); assertThat(udf.trunc((Double) null, 2), is((Long) null)); assertThat(udf.trunc((BigDecimal) null, 2), is((BigDecimal) null)); }
@Override public Float convert(String source) { return isNotEmpty(source) ? valueOf(source) : null; }
@Test void testConvert() { assertEquals(Float.valueOf("1.0"), converter.convert("1.0")); assertNull(converter.convert(null)); assertThrows(NumberFormatException.class, () -> { converter.convert("ttt"); }); }
public static Writer createWriter(Configuration conf, Writer.Option... opts ) throws IOException { Writer.CompressionOption compressionOption = Options.getOption(Writer.CompressionOption.class, opts); CompressionType kind; if (compressionOption != null) { kind = compressionOption.getValue(); } else { kind = getDefaultCompressionType(conf); opts = Options.prependOptions(opts, Writer.compression(kind)); } switch (kind) { default: case NONE: return new Writer(conf, opts); case RECORD: return new RecordCompressWriter(conf, opts); case BLOCK: return new BlockCompressWriter(conf, opts); } }
@Test public void testSerializationAvailability() throws IOException { Configuration conf = new Configuration(); Path path = new Path(GenericTestUtils.getTempPath( "serializationAvailability")); // Check if any serializers aren't found. try { SequenceFile.createWriter( conf, SequenceFile.Writer.file(path), SequenceFile.Writer.keyClass(String.class), SequenceFile.Writer.valueClass(NullWritable.class)); // Note: This may also fail someday if JavaSerialization // is activated by default. fail("Must throw IOException for missing serializer for the Key class"); } catch (IOException e) { assertTrue(e.getMessage().startsWith( "Could not find a serializer for the Key class: '" + String.class.getName() + "'.")); } try { SequenceFile.createWriter( conf, SequenceFile.Writer.file(path), SequenceFile.Writer.keyClass(NullWritable.class), SequenceFile.Writer.valueClass(String.class)); // Note: This may also fail someday if JavaSerialization // is activated by default. fail("Must throw IOException for missing serializer for the Value class"); } catch (IOException e) { assertTrue(e.getMessage().startsWith( "Could not find a serializer for the Value class: '" + String.class.getName() + "'.")); } // Write a simple file to test deserialization failures with writeTest(FileSystem.get(conf), 1, 1, path, CompressionType.NONE, null); // Remove Writable serializations, to enforce error. conf.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, AvroReflectSerialization.class.getName()); // Now check if any deserializers aren't found. try { new SequenceFile.Reader( conf, SequenceFile.Reader.file(path)); fail("Must throw IOException for missing deserializer for the Key class"); } catch (IOException e) { assertTrue(e.getMessage().startsWith( "Could not find a deserializer for the Key class: '" + RandomDatum.class.getName() + "'.")); } }
public KeltnerChannelMiddleIndicator(BarSeries series, int barCountEMA) { this(new TypicalPriceIndicator(series), barCountEMA); }
@Test public void keltnerChannelMiddleIndicatorTest() { KeltnerChannelMiddleIndicator km = new KeltnerChannelMiddleIndicator(new ClosePriceIndicator(data), 14); assertNumEquals(11764.23, km.getValue(13)); assertNumEquals(11793.0687, km.getValue(14)); assertNumEquals(11817.6182, km.getValue(15)); assertNumEquals(11839.9944, km.getValue(16)); assertNumEquals(11859.9725, km.getValue(17)); assertNumEquals(11864.2335, km.getValue(18)); assertNumEquals(11887.6903, km.getValue(19)); assertNumEquals(11908.2609, km.getValue(20)); assertNumEquals(11928.7941, km.getValue(21)); assertNumEquals(11950.5749, km.getValue(22)); assertNumEquals(11978.7156, km.getValue(23)); assertNumEquals(12012.6402, km.getValue(24)); assertNumEquals(12042.9401, km.getValue(25)); assertNumEquals(12067.7868, km.getValue(26)); assertNumEquals(12095.1832, km.getValue(27)); assertNumEquals(12118.2508, km.getValue(28)); assertNumEquals(12132.7027, km.getValue(29)); }
protected void clearGroup(ReceiptHandleGroupKey key) { if (key == null) { return; } ProxyConfig proxyConfig = ConfigurationManager.getProxyConfig(); ReceiptHandleGroup handleGroup = receiptHandleGroupMap.remove(key); if (handleGroup == null) { return; } handleGroup.scan((msgID, handle, v) -> { try { handleGroup.computeIfPresent(msgID, handle, messageReceiptHandle -> { CompletableFuture<AckResult> future = new CompletableFuture<>(); eventListener.fireEvent(new RenewEvent(key, messageReceiptHandle, proxyConfig.getInvisibleTimeMillisWhenClear(), RenewEvent.EventType.CLEAR_GROUP, future)); return CompletableFuture.completedFuture(null); }); } catch (Exception e) { log.error("error when clear handle for group. key:{}", key, e); } }); }
@Test public void testClearGroup() { Channel channel = PROXY_CONTEXT.getVal(ContextVariable.CHANNEL); receiptHandleManager.addReceiptHandle(PROXY_CONTEXT, channel, GROUP, MSG_ID, messageReceiptHandle); receiptHandleManager.clearGroup(new ReceiptHandleGroupKey(channel, GROUP)); SubscriptionGroupConfig groupConfig = new SubscriptionGroupConfig(); Mockito.when(metadataService.getSubscriptionGroupConfig(Mockito.any(), Mockito.eq(GROUP))).thenReturn(groupConfig); receiptHandleManager.scheduleRenewTask(); Mockito.verify(messagingProcessor, Mockito.timeout(1000).times(1)) .changeInvisibleTime(Mockito.any(ProxyContext.class), Mockito.any(ReceiptHandle.class), Mockito.eq(MESSAGE_ID), Mockito.eq(GROUP), Mockito.eq(TOPIC), Mockito.eq(ConfigurationManager.getProxyConfig().getInvisibleTimeMillisWhenClear())); }
@Override public void deleteDiyPage(Long id) { // 校验存在 validateDiyPageExists(id); // 删除 diyPageMapper.deleteById(id); }
@Test public void testDeleteDiyPage_success() { // mock 数据 DiyPageDO dbDiyPage = randomPojo(DiyPageDO.class); diyPageMapper.insert(dbDiyPage);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbDiyPage.getId(); // 调用 diyPageService.deleteDiyPage(id); // 校验数据不存在了 assertNull(diyPageMapper.selectById(id)); }
@Override public List<Type> getColumnTypes() { return columnTypes; }
@Test public void testGetColumnTypes() { RecordSet recordSet = new ExampleRecordSet(new ExampleSplit("test", "schema", "table", dataUri), ImmutableList.of( new ExampleColumnHandle("test", "text", createUnboundedVarcharType(), 0), new ExampleColumnHandle("test", "value", BIGINT, 1))); assertEquals(recordSet.getColumnTypes(), ImmutableList.of(createUnboundedVarcharType(), BIGINT)); recordSet = new ExampleRecordSet(new ExampleSplit("test", "schema", "table", dataUri), ImmutableList.of( new ExampleColumnHandle("test", "value", BIGINT, 1), new ExampleColumnHandle("test", "text", createUnboundedVarcharType(), 0))); assertEquals(recordSet.getColumnTypes(), ImmutableList.of(BIGINT, createUnboundedVarcharType())); recordSet = new ExampleRecordSet(new ExampleSplit("test", "schema", "table", dataUri), ImmutableList.of( new ExampleColumnHandle("test", "value", BIGINT, 1), new ExampleColumnHandle("test", "value", BIGINT, 1), new ExampleColumnHandle("test", "text", createUnboundedVarcharType(), 0))); assertEquals(recordSet.getColumnTypes(), ImmutableList.of(BIGINT, BIGINT, createUnboundedVarcharType())); recordSet = new ExampleRecordSet(new ExampleSplit("test", "schema", "table", dataUri), ImmutableList.of()); assertEquals(recordSet.getColumnTypes(), ImmutableList.of()); }
@Override public GetDataStream getDataStream() { return windmillStreamFactory.createGetDataStream( dispatcherClient.getWindmillServiceStub(), throttleTimers.getDataThrottleTimer()); }
@Test public void testStreamingGetDataHeartbeatsAsHeartbeatRequests() throws Exception { // This server records the heartbeats observed but doesn't respond. final List<ComputationHeartbeatRequest> receivedHeartbeats = new ArrayList<>(); serviceRegistry.addService( new CloudWindmillServiceV1Alpha1ImplBase() { @Override public StreamObserver<StreamingGetDataRequest> getDataStream( StreamObserver<StreamingGetDataResponse> responseObserver) { return new StreamObserver<StreamingGetDataRequest>() { boolean sawHeader = false; @Override public void onNext(StreamingGetDataRequest chunk) { try { if (!sawHeader) { LOG.info("Received header"); errorCollector.checkThat( chunk.getHeader(), Matchers.equalTo( JobHeader.newBuilder() .setJobId("job") .setProjectId("project") .setWorkerId("worker") .setClientId(clientId) .build())); sawHeader = true; } else { LOG.info( "Received {} computationHeartbeatRequests", chunk.getComputationHeartbeatRequestCount()); errorCollector.checkThat( chunk.getSerializedSize(), Matchers.lessThanOrEqualTo(STREAM_CHUNK_SIZE)); errorCollector.checkThat(chunk.getRequestIdCount(), Matchers.is(0)); synchronized (receivedHeartbeats) { receivedHeartbeats.addAll(chunk.getComputationHeartbeatRequestList()); } } } catch (Exception e) { errorCollector.addError(e); } } @Override public void onError(Throwable throwable) {} @Override public void onCompleted() { responseObserver.onCompleted(); } }; } }); List<String> computation1Keys = new ArrayList<>(); List<String> computation2Keys = new ArrayList<>(); // When sending heartbeats as HeartbeatRequest protos, all keys for the same computation should // be batched into the same ComputationHeartbeatRequest. Compare to the KeyedGetDataRequest // version in the test above, which only sends one key per ComputationGetDataRequest. List<ComputationHeartbeatRequest> expectedHeartbeats = new ArrayList<>(); ComputationHeartbeatRequest.Builder comp1Builder = ComputationHeartbeatRequest.newBuilder().setComputationId("Computation1"); ComputationHeartbeatRequest.Builder comp2Builder = ComputationHeartbeatRequest.newBuilder().setComputationId("Computation2"); for (int i = 0; i < 100; ++i) { String computation1Key = "Computation1Key" + i; computation1Keys.add(computation1Key); comp1Builder.addHeartbeatRequests( makeHeartbeatRequest(Collections.singletonList(computation1Key)).get(0)); String computation2Key = "Computation2Key" + largeString(i * 20); computation2Keys.add(computation2Key); comp2Builder.addHeartbeatRequests( makeHeartbeatRequest(Collections.singletonList(computation2Key)).get(0)); } expectedHeartbeats.add(comp1Builder.build()); expectedHeartbeats.add(comp2Builder.build()); Map<String, Collection<HeartbeatRequest>> heartbeatRequestMap = new HashMap<>(); heartbeatRequestMap.put("Computation1", makeHeartbeatRequest(computation1Keys)); heartbeatRequestMap.put("Computation2", makeHeartbeatRequest(computation2Keys)); GetDataStream stream = client.getDataStream(); stream.refreshActiveWork(heartbeatRequestMap); stream.halfClose(); assertTrue(stream.awaitTermination(60, TimeUnit.SECONDS)); boolean receivedAllHeartbeatRequests = false; while (!receivedAllHeartbeatRequests) { Thread.sleep(100); synchronized (receivedHeartbeats) { if (receivedHeartbeats.size() != expectedHeartbeats.size()) { continue; } assertEquals(expectedHeartbeats, receivedHeartbeats); receivedAllHeartbeatRequests = true; } } }
public String getDatabase() { return database; }
@Test public void testGetDefaultSessionDatabase() { UserProperty userProperty = new UserProperty(); String defaultSessionDatabase = userProperty.getDatabase(); Assert.assertEquals("", defaultSessionDatabase); }
@Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; // Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand. if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1; if (spareHostCapacity == 0) { List<Move> mitigation = findMitigation(failurePath.get()); if (execute(mitigation, failurePath.get())) { // We succeeded or are in the process of taking a step to mitigate. // Report with the assumption this will eventually succeed to avoid alerting before we're stuck spareHostCapacity++; } else { success = false; } } metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; }
@Test public void testTwoSpares() { var tester = new SpareCapacityMaintainerTester(); tester.addHosts(3, new NodeResources(10, 100, 1000, 1)); tester.addNodes(0, 1, new NodeResources(10, 100, 1000, 1), 0); tester.maintainer.maintain(); assertEquals(0, tester.deployer.activations); assertEquals(0, tester.nodeRepository.nodes().list().retired().size()); assertEquals(2, tester.metric.values.get("spareHostCapacity")); }
@Override public void write(AvroKey<T> record, NullWritable ignore) throws IOException { mAvroFileWriter.append(record.datum()); }
@Test void write() throws IOException { Schema writerSchema = Schema.create(Schema.Type.INT); GenericData dataModel = new ReflectData(); CodecFactory compressionCodec = CodecFactory.nullCodec(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); TaskAttemptContext context = mock(TaskAttemptContext.class); // Write an avro container file with two records: 1 and 2. AvroKeyRecordWriter<Integer> recordWriter = new AvroKeyRecordWriter<>(writerSchema, dataModel, compressionCodec, outputStream); recordWriter.write(new AvroKey<>(1), NullWritable.get()); recordWriter.write(new AvroKey<>(2), NullWritable.get()); recordWriter.close(context); // Verify that the file was written as expected. InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); Schema readerSchema = Schema.create(Schema.Type.INT); DatumReader<Integer> datumReader = new SpecificDatumReader<>(readerSchema); DataFileStream<Integer> dataFileReader = new DataFileStream<>(inputStream, datumReader); assertTrue(dataFileReader.hasNext()); // Record 1. assertEquals(1, dataFileReader.next().intValue()); assertTrue(dataFileReader.hasNext()); // Record 2. assertEquals(2, dataFileReader.next().intValue()); assertFalse(dataFileReader.hasNext()); // No more records. dataFileReader.close(); verify(context, never()).getConfiguration(); }
@Override public void updatePod(Pod pod) { checkNotNull(pod, ERR_NULL_POD); checkArgument(!Strings.isNullOrEmpty(pod.getMetadata().getUid()), ERR_NULL_POD_UID); k8sPodStore.updatePod(pod); log.info(String.format(MSG_POD, pod.getMetadata().getName(), MSG_UPDATED)); }
@Test(expected = IllegalArgumentException.class) public void testUpdateUnregisteredPod() { target.updatePod(POD); }
@Override public SmsTemplateRespDTO getSmsTemplate(String apiTemplateId) throws Throwable { // 1. 构建请求 // 参考链接 https://cloud.tencent.com/document/product/382/52067 TreeMap<String, Object> body = new TreeMap<>(); body.put("International", INTERNATIONAL_CHINA); body.put("TemplateIdSet", new Integer[]{Integer.valueOf(apiTemplateId)}); JSONObject response = request("DescribeSmsTemplateList", body); // TODO @scholar:会有请求失败的情况么?类似发送的(那块逻辑我补充了) JSONObject TemplateStatusSet = response.getJSONObject("Response").getJSONArray("DescribeTemplateStatusSet").getJSONObject(0); String content = TemplateStatusSet.get("TemplateContent").toString(); int templateStatus = Integer.parseInt(TemplateStatusSet.get("StatusCode").toString()); String auditReason = TemplateStatusSet.get("ReviewReply").toString(); return new SmsTemplateRespDTO().setId(apiTemplateId).setContent(content) .setAuditStatus(convertSmsTemplateAuditStatus(templateStatus)).setAuditReason(auditReason); }
@Test public void testGetSmsTemplate() throws Throwable { try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) { // 准备参数 String apiTemplateId = "1122"; // mock 方法 httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString())) .thenReturn("{ \"Response\": {\n" + " \"DescribeTemplateStatusSet\": [\n" + " {\n" + " \"TemplateName\": \"验证码\",\n" + " \"TemplateId\": 1122,\n" + " \"International\": 0,\n" + " \"ReviewReply\": \"审批备注\",\n" + " \"CreateTime\": 1617379200,\n" + " \"TemplateContent\": \"您的验证码是{1}\",\n" + " \"StatusCode\": 0\n" + " },\n" + " \n" + " ],\n" + " \"RequestId\": \"f36e4f00-605e-49b1-ad0d-bfaba81c7325\"\n" + " }}"); // 调用 SmsTemplateRespDTO result = smsClient.getSmsTemplate(apiTemplateId); // 断言 assertEquals("1122", result.getId()); assertEquals("您的验证码是{1}", result.getContent()); assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(), result.getAuditStatus()); assertEquals("审批备注", result.getAuditReason()); } }
@Override public Result invoke(Invocation invocation) throws RpcException { if (invocation instanceof RpcInvocation) { ((RpcInvocation) invocation).setInvoker(this); } String mock = getUrl().getMethodParameter(invocation.getMethodName(), MOCK_KEY); if (StringUtils.isBlank(mock)) { throw new RpcException(new IllegalAccessException("mock can not be null. url :" + url)); } mock = normalizeMock(URL.decode(mock)); if (mock.startsWith(RETURN_PREFIX)) { mock = mock.substring(RETURN_PREFIX.length()).trim(); try { Type[] returnTypes = RpcUtils.getReturnTypes(invocation); Object value = parseMockValue(mock, returnTypes); return AsyncRpcResult.newDefaultAsyncResult(value, invocation); } catch (Exception ew) { throw new RpcException( "mock return invoke error. method :" + invocation.getMethodName() + ", mock:" + mock + ", url: " + url, ew); } } else if (mock.startsWith(THROW_PREFIX)) { mock = mock.substring(THROW_PREFIX.length()).trim(); if (StringUtils.isBlank(mock)) { throw new RpcException("mocked exception for service degradation."); } else { // user customized class Throwable t = getThrowable(mock); throw new RpcException(RpcException.BIZ_EXCEPTION, t); } } else { // impl mock try { Invoker<T> invoker = getInvoker(mock); return invoker.invoke(invocation); } catch (Throwable t) { throw new RpcException("Failed to create mock implementation class " + mock, t); } } }
@Test void testInvokeThrowsRpcException3() { URL url = URL.valueOf("remote://1.2.3.4/" + String.class.getName()); url = url.addParameter(MOCK_KEY, "throw"); MockInvoker mockInvoker = new MockInvoker(url, String.class); RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getSomething"); Assertions.assertThrows(RpcException.class, () -> mockInvoker.invoke(invocation)); }
public double getNormalizedEditDistance(String source, String target) { ImmutableList<String> sourceTerms = NamingConventions.splitToLowercaseTerms(source); ImmutableList<String> targetTerms = NamingConventions.splitToLowercaseTerms(target); // costMatrix[s][t] is the edit distance between source term s and target term t double[][] costMatrix = sourceTerms.stream() .map(s -> targetTerms.stream().mapToDouble(t -> editDistanceFn.apply(s, t)).toArray()) .toArray(double[][]::new); // worstCaseMatrix[s][t] is the worst case distance between source term s and target term t double[][] worstCaseMatrix = sourceTerms.stream() .map(s -> s.length()) .map( s -> targetTerms.stream() .map(t -> t.length()) .mapToDouble(t -> maxDistanceFn.apply(s, t)) .toArray()) .toArray(double[][]::new); double[] sourceTermDeletionCosts = sourceTerms.stream().mapToDouble(s -> maxDistanceFn.apply(s.length(), 0)).toArray(); double[] targetTermAdditionCosts = targetTerms.stream().mapToDouble(s -> maxDistanceFn.apply(0, s.length())).toArray(); // this is an array of assignments of source terms to target terms. If assignments[i] contains // the value j this means that source term i has been assigned to target term j // There will be one entry in cost for each source term: // - If there are more source terms than target terms then some will be unassigned - value -1 // - If there are a fewer source terms than target terms then some target terms will not be // referenced in the array int[] assignments = new HungarianAlgorithm(costMatrix).execute(); double assignmentCost = computeCost(assignments, costMatrix, sourceTermDeletionCosts, targetTermAdditionCosts); double maxCost = computeCost(assignments, worstCaseMatrix, sourceTermDeletionCosts, targetTermAdditionCosts); return assignmentCost / maxCost; }
@Test public void getNormalizedEditDistance_returnsMatch_withPermutedTerms() { TermEditDistance termEditDistance = new TermEditDistance(); String sourceIdentifier = "fooBarBaz"; String targetIdentifier = "bazFooBar"; double distance = termEditDistance.getNormalizedEditDistance(sourceIdentifier, targetIdentifier); assertThat(distance).isEqualTo(0.0); }
@Override public synchronized void putTargetState(String connector, TargetState state) { ConnectorState connectorState = connectors.get(connector); if (connectorState == null) throw new IllegalArgumentException("No connector `" + connector + "` configured"); TargetState prevState = connectorState.targetState; connectorState.targetState = state; if (updateListener != null && !state.equals(prevState)) updateListener.onConnectorTargetStateChange(connector); }
@Test public void testPutTargetState() { // Can't write target state for non-existent connector assertThrows(IllegalArgumentException.class, () -> configStore.putTargetState(CONNECTOR_IDS.get(0), TargetState.PAUSED)); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); configStore.putTargetState(CONNECTOR_IDS.get(0), TargetState.PAUSED); // Ensure that ConfigBackingStore.UpdateListener::onConnectorTargetStateChange is called only once if the same state is written twice configStore.putTargetState(CONNECTOR_IDS.get(0), TargetState.PAUSED); ClusterConfigState configState = configStore.snapshot(); assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); verify(configUpdateListener).onConnectorTargetStateChange(eq(CONNECTOR_IDS.get(0))); }
public Future<KafkaCluster> prepareKafkaCluster( Kafka kafkaCr, List<KafkaNodePool> nodePools, Map<String, Storage> oldStorage, Map<String, List<String>> currentPods, KafkaVersionChange versionChange, KafkaStatus kafkaStatus, boolean tryToFixProblems) { return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange) .compose(kafka -> brokerRemovalCheck(kafkaCr, kafka)) .compose(kafka -> { if (checkFailed() && tryToFixProblems) { // We have a failure, and should try to fix issues // Once we fix it, we call this method again, but this time with tryToFixProblems set to false return revertScaleDown(kafka, kafkaCr, nodePools) .compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs())) .compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false)); } else if (checkFailed()) { // We have a failure, but we should not try to fix it List<String> errors = new ArrayList<>(); if (scaleDownCheckFailed) { errors.add("Cannot scale-down Kafka brokers " + kafka.removedNodes() + " because they have assigned partition-replicas."); } if (usedToBeBrokersCheckFailed) { errors.add("Cannot remove the broker role from nodes " + kafka.usedToBeBrokerNodes() + " because they have assigned partition-replicas."); } return Future.failedFuture(new InvalidResourceException("Following errors were found when processing the Kafka custom resource: " + errors)); } else { // If everything succeeded, we return the KafkaCluster object // If any warning conditions exist from the reverted changes, we add them to the status if (!warningConditions.isEmpty()) { kafkaStatus.addConditions(warningConditions); } return Future.succeededFuture(kafka); } }); }
@Test public void testExistingClusterWithMixedNodesKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); Checkpoint async = context.checkpoint(); creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); assertThat(kc.nodes().size(), is(3)); assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(3000, 3001, 3002))); assertThat(kc.removedNodes(), is(Set.of())); // Check the status conditions assertThat(kafkaStatus.getConditions(), is(nullValue())); // No scale-down => scale-down check is not done verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); async.flag(); }))); }
@SafeVarargs public static <K, V> Map<K, V> ofEntries(Map.Entry<K, V>... entries) { final Map<K, V> map = new HashMap<>(); for (Map.Entry<K, V> pair : entries) { map.put(pair.getKey(), pair.getValue()); } return map; }
@Test public void ofEntriesTest(){ final Map<String, Integer> map = MapUtil.ofEntries(MapUtil.entry("a", 1), MapUtil.entry("b", 2)); assertEquals(2, map.size()); assertEquals(Integer.valueOf(1), map.get("a")); assertEquals(Integer.valueOf(2), map.get("b")); }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_Longer() { assertThat(array(2.2d, 3.3d)).isNotEqualTo(array(2.2d, 3.3d, 4.4d)); }
public static String quantityToRSDecimalStack(int quantity) { return quantityToRSDecimalStack(quantity, false); }
@Test public void quantityToRSDecimalStackSize() { assertEquals("0", QuantityFormatter.quantityToRSDecimalStack(0)); assertEquals("8500", QuantityFormatter.quantityToRSDecimalStack(8_500)); assertEquals("10K", QuantityFormatter.quantityToRSDecimalStack(10_000)); assertEquals("21.7K", QuantityFormatter.quantityToRSDecimalStack(21_700)); assertEquals("100K", QuantityFormatter.quantityToRSDecimalStack(100_000)); assertEquals("100.3K", QuantityFormatter.quantityToRSDecimalStack(100_300)); assertEquals("1M", QuantityFormatter.quantityToRSDecimalStack(1_000_000)); assertEquals("8.4M", QuantityFormatter.quantityToRSDecimalStack(8_450_000)); assertEquals("10M", QuantityFormatter.quantityToRSDecimalStack(10_000_000)); assertEquals("12.8M", QuantityFormatter.quantityToRSDecimalStack(12_800_000)); assertEquals("100M", QuantityFormatter.quantityToRSDecimalStack(100_000_000)); assertEquals("250.1M", QuantityFormatter.quantityToRSDecimalStack(250_100_000)); assertEquals("1B", QuantityFormatter.quantityToRSDecimalStack(1_000_000_000)); assertEquals("1.5B", QuantityFormatter.quantityToRSDecimalStack(1500_000_000)); assertEquals("2.1B", QuantityFormatter.quantityToRSDecimalStack(Integer.MAX_VALUE)); }
public RemotingCommand viewMessageById(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); final ViewMessageRequestHeader requestHeader = (ViewMessageRequestHeader) request.decodeCommandCustomHeader(ViewMessageRequestHeader.class); response.setOpaque(request.getOpaque()); final SelectMappedBufferResult selectMappedBufferResult = this.brokerController.getMessageStore().selectOneMessageByOffset(requestHeader.getOffset()); if (selectMappedBufferResult != null) { response.setCode(ResponseCode.SUCCESS); response.setRemark(null); try { FileRegion fileRegion = new OneMessageTransfer(response.encodeHeader(selectMappedBufferResult.getSize()), selectMappedBufferResult); ctx.channel() .writeAndFlush(fileRegion) .addListener((ChannelFutureListener) future -> { selectMappedBufferResult.release(); Attributes attributes = RemotingMetricsManager.newAttributesBuilder() .put(LABEL_REQUEST_CODE, RemotingHelper.getRequestCodeDesc(request.getCode())) .put(LABEL_RESPONSE_CODE, RemotingHelper.getResponseCodeDesc(response.getCode())) .put(LABEL_RESULT, RemotingMetricsManager.getWriteAndFlushResult(future)) .build(); RemotingMetricsManager.rpcLatency.record(request.getProcessTimer().elapsed(TimeUnit.MILLISECONDS), attributes); if (!future.isSuccess()) { LOGGER.error("Transfer one message from page cache failed, ", future.cause()); } }); } catch (Throwable e) { LOGGER.error("", e); selectMappedBufferResult.release(); } return null; } else { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("can not find message by the offset, " + requestHeader.getOffset()); } return response; }
@Test public void testViewMessageById() throws RemotingCommandException { ViewMessageRequestHeader viewMessageRequestHeader = new ViewMessageRequestHeader(); viewMessageRequestHeader.setTopic("topic"); viewMessageRequestHeader.setOffset(0L); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.VIEW_MESSAGE_BY_ID, viewMessageRequestHeader); request.makeCustomHeaderToNet(); request.setCode(RequestCode.VIEW_MESSAGE_BY_ID); when(messageStore.selectOneMessageByOffset(anyLong())).thenReturn(null); RemotingCommand response = queryMessageProcessor.processRequest(handlerContext, request); Assert.assertEquals(response.getCode(), ResponseCode.SYSTEM_ERROR); when(messageStore.selectOneMessageByOffset(anyLong())).thenReturn(new SelectMappedBufferResult(0, null, 0, null)); response = queryMessageProcessor.processRequest(handlerContext, request); Assert.assertNull(response); }
@Override public void write(final Path file, final Distribution distribution, final LoginCallback prompt) throws BackgroundException { final Path container = containerService.getContainer(file); try { String suffix = "index.html"; if(StringUtils.isNotBlank(distribution.getIndexDocument())) { suffix = PathNormalizer.name(distribution.getIndexDocument()); } // Enable website endpoint final Storage.Buckets.Patch request = session.getClient().buckets().patch(container.getName(), new Bucket() .setLogging(new Bucket.Logging() .setLogObjectPrefix(distribution.isEnabled() ? new HostPreferences(session.getHost()).getProperty("google.logging.prefix") : null) .setLogBucket(StringUtils.isNotBlank(distribution.getLoggingContainer()) ? distribution.getLoggingContainer() : container.getName())) .setWebsite( distribution.isEnabled() ? new Bucket.Website().setMainPageSuffix(suffix) : null )); if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) { request.setUserProject(session.getHost().getCredentials().getUsername()); } request.execute(); } catch(IOException e) { throw new GoogleStorageExceptionMappingService().map("Cannot write website configuration", e); } }
@Test public void testWrite() throws Exception { final DistributionConfiguration configuration = new GoogleStorageWebsiteDistributionConfiguration(session); final Path bucket = new Path(new AsciiRandomStringService().random().toLowerCase(Locale.ROOT), EnumSet.of(Path.Type.directory, Path.Type.volume)); new GoogleStorageDirectoryFeature(session).mkdir(bucket, new TransferStatus()); configuration.write(bucket, new Distribution(Distribution.WEBSITE, null, true), new DisabledLoginCallback()); final Distribution distribution = configuration.read(bucket, Distribution.WEBSITE, new DisabledLoginCallback()); assertTrue(distribution.isEnabled()); assertEquals(configuration.getName(), distribution.getName()); new GoogleStorageDeleteFeature(session).delete(Collections.<Path>singletonList(bucket), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void createIndex(DBObject keys, DBObject options) { delegate.createIndex(new BasicDBObject(keys.toMap()), toIndexOptions(options)); }
@Test void createIndex() { final var collection = jacksonCollection("simple", Simple.class); collection.createIndex(new BasicDBObject("name", 1)); collection.createIndex(new BasicDBObject("_id", 1).append("name", 1)); assertThat(mongoCollection("simple").listIndexes()).containsExactlyInAnyOrder( new Document("key", new Document("_id", 1)) .append("name", "_id_") .append("v", 2), new Document("key", new Document("name", 1)) .append("name", "name_1") .append("v", 2), new Document("key", new Document("_id", 1) .append("name", 1)) .append("name", "_id_1_name_1") .append("v", 2) ); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer, final Merger<? super K, V> sessionMerger) { return aggregate(initializer, sessionMerger, Materialized.with(null, null)); }
@Test public void sessionWindowAggregateTest() { final KTable<Windowed<String>, String> customers = groupedStream.cogroup(MockAggregator.TOSTRING_ADDER) .windowedBy(SessionWindows.with(ofMillis(500))) .aggregate(MockInitializer.STRING_INIT, sessionMerger, Materialized.with(Serdes.String(), Serdes.String())); customers.toStream().to(OUTPUT); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic<String, String> testInputTopic = driver.createInputTopic( TOPIC, new StringSerializer(), new StringSerializer()); final TestOutputTopic<Windowed<String>, String> testOutputTopic = driver.createOutputTopic( OUTPUT, new SessionWindowedDeserializer<>(new StringDeserializer()), new StringDeserializer()); testInputTopic.pipeInput("k1", "A", 0); testInputTopic.pipeInput("k2", "A", 0); testInputTopic.pipeInput("k1", "B", 599); testInputTopic.pipeInput("k2", "B", 607); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 0); assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A", 0); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+B", 599); assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+B", 607); } }
@Override public Optional<RedirectionAction> getRedirectionAction(final CallContext ctx) { val webContext = ctx.webContext(); var computeLoginUrl = configuration.computeFinalLoginUrl(webContext); val computedCallbackUrl = client.computeFinalCallbackUrl(webContext); val renew = configuration.isRenew() || webContext.getRequestAttribute(RedirectionActionBuilder.ATTRIBUTE_FORCE_AUTHN).isPresent(); val gateway = configuration.isGateway() || webContext.getRequestAttribute(RedirectionActionBuilder.ATTRIBUTE_PASSIVE).isPresent(); val redirectionUrl = constructRedirectUrl(computeLoginUrl, getServiceParameter(), computedCallbackUrl, renew, gateway, configuration.getMethod()); LOGGER.debug("redirectionUrl: {}", redirectionUrl); return Optional.of(HttpActionHelper.buildRedirectUrlAction(webContext, redirectionUrl)); }
@Test public void testRedirect() { val builder = newBuilder(new CasConfiguration()); val action = builder.getRedirectionAction(new CallContext(MockWebContext.create(), new MockSessionStore())).get(); assertTrue(action instanceof FoundAction); assertEquals(LOGIN_URL + "?service=http%3A%2F%2Fwww.pac4j.org%2Ftest.html%3Fclient_name%3DCasClient", ((FoundAction) action).getLocation()); }
public String geomap() { return get(GEOMAP, null); }
@Test(expected = InvalidFieldException.class) public void cantSetGeoIfSpritesAreSet() { cfg = cfgFromJson(tmpNode(SPRITES)); cfg.geomap("map-name"); }
public static @Nullable Duration fromCloudDuration(String duration) { Matcher matcher = DURATION_PATTERN.matcher(duration); if (!matcher.matches()) { return null; } long millis = Long.parseLong(matcher.group(1)) * 1000; String frac = matcher.group(2); if (frac != null) { long fracs = Long.parseLong(frac); if (frac.length() == 3) { // millisecond resolution millis += fracs; } else if (frac.length() == 6) { // microsecond resolution millis += fracs / 1000; } else if (frac.length() == 9) { // nanosecond resolution millis += fracs / 1000000; } else { return null; } } return Duration.millis(millis); }
@Test public void fromCloudDurationShouldParseDurationStrings() { assertEquals(Duration.millis(4000), fromCloudDuration("4s")); assertEquals(Duration.millis(4001), fromCloudDuration("4.001s")); assertEquals(Duration.millis(4001), fromCloudDuration("4.001000s")); assertEquals(Duration.millis(4001), fromCloudDuration("4.001001s")); assertEquals(Duration.millis(4001), fromCloudDuration("4.001000000s")); assertEquals(Duration.millis(4001), fromCloudDuration("4.001000001s")); assertNull(fromCloudDuration("")); assertNull(fromCloudDuration("4")); assertNull(fromCloudDuration("4.1")); assertNull(fromCloudDuration("4.1s")); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(true); assertTrue(criterion.betterThan(numOf(2.0), numOf(1.5))); assertFalse(criterion.betterThan(numOf(1.5), numOf(2.0))); }
public static SqlDecimal toSqlDecimal(final SqlType type) { switch (type.baseType()) { case DECIMAL: return (SqlDecimal) type; case INTEGER: return SqlTypes.INT_UPCAST_TO_DECIMAL; case BIGINT: return SqlTypes.BIGINT_UPCAST_TO_DECIMAL; default: throw new KsqlException( "Cannot convert " + type.baseType() + " to " + SqlBaseType.DECIMAL + "."); } }
@Test public void shouldConvertLongToSqlDecimal() { // When: final SqlDecimal decimal = DecimalUtil.toSqlDecimal(SqlTypes.BIGINT); // Then: assertThat(decimal, is(SqlTypes.decimal(19, 0))); }