focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1Des3EncryptedDsaEmptyPassword() throws Exception { assertThrows(IOException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_des3_encrypted.key") .getFile()), ""); } }); }
public Optional<ShardingTable> findShardingTable(final String logicTableName) { if (Strings.isNullOrEmpty(logicTableName) || !shardingTables.containsKey(logicTableName)) { return Optional.empty(); } return Optional.of(shardingTables.get(logicTableName)); }
@Test void assertNotFindTableRuleWhenTableNameIsNull() { assertFalse(createMaximumShardingRule().findShardingTable(null).isPresent()); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDynamicStructNestedEncode() { String rawInput = "0x0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000002" + "6964000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "6e616d6500000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getNuuFunction.getOutputParameters()), Collections.singletonList( new AbiV2TestFixture.Nuu(new AbiV2TestFixture.Foo("id", "name")))); }
public synchronized void cleanup(Collection<Long> removedPlanIds) { for (long removedPlanId : removedPlanIds) { clean(removedPlanId); } }
@Test public void testCleanup() throws Exception { SleepJobConfig jobConfig = new SleepJobConfig(1); mPlanTracker.run(jobConfig, mCommandManager, mMockJobServerContext, mWorkers, 1); jobConfig = new SleepJobConfig(1); mPlanTracker.run(jobConfig, mCommandManager, mMockJobServerContext, mWorkers, 2); jobConfig = new SleepJobConfig(1); mPlanTracker.run(jobConfig, mCommandManager, mMockJobServerContext, mWorkers, 3); doAnswer(invocation -> { PlanConfig config = invocation.getArgument(0, PlanConfig.class); long jobId = invocation.getArgument(1, Long.class); mPlanTracker.run(config, mCommandManager, mMockJobServerContext, mWorkers, jobId); return null; }).when(mMockJobMaster).run(any(PlanConfig.class), any(Long.class)); ArrayList<JobConfig> jobs = Lists.newArrayList(); SleepJobConfig child1 = new SleepJobConfig(1); SleepJobConfig child2 = new SleepJobConfig(2); jobs.add(child1); jobs.add(child2); CompositeConfig config = new CompositeConfig(jobs, false); mWorkflowTracker.run(config, 0); try { mPlanTracker.run(new SleepJobConfig(1), mCommandManager, mMockJobServerContext, mWorkers, 4); fail(); } catch (ResourceExhaustedException e) { // Should fail } mPlanTracker.coordinators().stream().filter(coordinator -> coordinator.getJobId() == 100) .findFirst().get().setJobAsFailed("TestError", "failed"); mPlanTracker.run(new SleepJobConfig(1), mCommandManager, mMockJobServerContext, mWorkers, 4); assertNotNull(mWorkflowTracker.getStatus(0, true)); try { mPlanTracker.run(new SleepJobConfig(1), mCommandManager, mMockJobServerContext, mWorkers, 5); fail(); } catch (ResourceExhaustedException e) { // Should fail } mPlanTracker.coordinators().stream().filter(coordinator -> coordinator.getJobId() == 101) .findFirst().get().setJobAsFailed("TestError", "failed"); mPlanTracker.run(new SleepJobConfig(1), mCommandManager, mMockJobServerContext, mWorkers, 5); assertNull(mWorkflowTracker.getStatus(100, true)); }
public double cost() { return cost; }
@Test public void test() throws Exception { System.out.println("tSNE"); MathEx.setSeed(19650218); // to get repeatable results. PCA pca = PCA.fit(MNIST.x).getProjection(50); double[][] X = pca.apply(MNIST.x); long start = System.currentTimeMillis(); TSNE tsne = new TSNE(X, 2, 20, 200, 550); long end = System.currentTimeMillis(); System.out.format("t-SNE takes %.2f seconds\n", (end - start) / 1000.0); assertEquals(1.3872256, tsne.cost(), 0.1); /* double[] coord0 = { 2.6870328, 16.8175010}; double[] coord100 = {-16.3270630, 3.6016438}; double[] coord1000 = {-16.2529939, 26.8543395}; double[] coord2000 = {-17.0491869, 4.8453648}; assertArrayEquals(coord0, tsne.coordinates[0], 1E-6); assertArrayEquals(coord100, tsne.coordinates[100], 1E-6); assertArrayEquals(coord1000, tsne.coordinates[1000], 1E-6); assertArrayEquals(coord2000, tsne.coordinates[2000], 1E-6); */ }
public int getPort() { return port; }
@Test public void testDefaultPortIsSet() throws Exception { CouchbaseEndpoint endpoint = new CouchbaseEndpoint( "couchbase:http://localhost/bucket", "http://localhost/bucket", new CouchbaseComponent()); assertEquals(DEFAULT_COUCHBASE_PORT, endpoint.getPort()); }
@ScalarOperator(CAST) @SqlType(StandardTypes.INTEGER) public static long castToInteger(@SqlType(StandardTypes.DOUBLE) double value) { try { return DoubleMath.roundToInt(value, HALF_UP); } catch (ArithmeticException e) { throw new PrestoException(INVALID_CAST_ARGUMENT, format("Unable to cast %s to integer", value), e); } }
@Test public void testCastToInteger() { assertFunction("cast(37.7E0 as integer)", INTEGER, 38); assertFunction("cast(-37.7E0 as integer)", INTEGER, -38); assertFunction("cast(17.1E0 as integer)", INTEGER, 17); assertFunction("cast(-17.1E0 as integer)", INTEGER, -17); assertFunction("cast(9.2E8 as integer)", INTEGER, 920000000); assertFunction("cast(-9.2E8 as integer)", INTEGER, -920000000); assertFunction("cast(2.21E8 as integer)", INTEGER, 221000000); assertFunction("cast(-2.21E8 as integer)", INTEGER, -221000000); assertFunction("cast(17.5E0 as integer)", INTEGER, 18); assertFunction("cast(-17.5E0 as integer)", INTEGER, -18); assertFunction("cast(" + Math.nextDown(0x1.0p31f) + " as integer)", INTEGER, (int) Math.nextDown(0x1.0p31f)); assertInvalidFunction("cast(" + 0x1.0p31 + " as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(" + Math.nextUp(0x1.0p31f) + " as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(" + Math.nextDown(-0x1.0p31f) + " as integer)", INVALID_CAST_ARGUMENT); assertFunction("cast(" + -0x1.0p31 + " as integer)", INTEGER, (int) -0x1.0p31); assertFunction("cast(" + Math.nextUp(-0x1.0p31f) + " as integer)", INTEGER, (int) Math.nextUp(-0x1.0p31f)); assertInvalidFunction("cast(9.3E9 as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(-9.3E9 as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(infinity() as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(-infinity() as integer)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(nan() as integer)", INVALID_CAST_ARGUMENT); }
@Override public SchemaResult getValueSchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false); }
@Test public void shouldPassCorrectValueWrapping() { // When: supplier.getValueSchema(Optional.of(TOPIC_NAME), Optional.empty(), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // Then: verify(schemaTranslator).toColumns(parsedSchema, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES), false); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testGetName() throws InvalidSettingException { Assume.assumeThat(getSettings().getBoolean(Settings.KEYS.ANALYZER_NODE_PACKAGE_ENABLED), is(true)); Assume.assumeThat(getSettings().getBoolean(Settings.KEYS.ANALYZER_NODE_AUDIT_ENABLED), is(true)); assertThat(analyzer.getName(), is("Node.js Package Analyzer")); }
public final AccessControlEntry entry() { return entry; }
@Test public void shouldNotThrowOnUnknownResourceType() { new AclBinding(new ResourcePattern(ResourceType.UNKNOWN, "foo", PatternType.LITERAL), ACL1.entry()); }
public static TopicConsumerConfigurationData ofTopicName(@NonNull String topicName, int priorityLevel) { return of(new TopicNameMatcher.TopicName(topicName), priorityLevel); }
@Test public void testOfDefaultFactoryMethod() { ConsumerConfigurationData<Object> consumerConfigurationData = new ConsumerConfigurationData<>(); consumerConfigurationData.setPriorityLevel(1); TopicConsumerConfigurationData topicConsumerConfigurationData = TopicConsumerConfigurationData .ofTopicName("foo", consumerConfigurationData); assertThat(topicConsumerConfigurationData.getTopicNameMatcher().matches("foo")).isTrue(); assertThat(topicConsumerConfigurationData.getPriorityLevel()).isEqualTo(1); }
public static UUIDUtils getInstance() { return ID_WORKER_UTILS; }
@Test public void testGetInstance() { UUIDUtils uuidUtils = UUIDUtils.getInstance(); assertNotNull(uuidUtils); }
@Override public String getName() { return name; }
@Test public void testConstructorWithEmptyName() { ReliableTopicConfig config = new ReliableTopicConfig(""); assertTrue(config.getName().isEmpty()); }
@Override public CompletableFuture<Map<MessageQueue, Long>> invokeBrokerToResetOffset(String address, ResetOffsetRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<Map<MessageQueue, Long>> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_RESET_OFFSET, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS && null != response.getBody()) { Map<MessageQueue, Long> offsetTable = ResetOffsetBody.decode(response.getBody(), ResetOffsetBody.class).getOffsetTable(); future.complete(offsetTable); log.info("Invoke broker to reset offset success. address:{}, header:{}, offsetTable:{}", address, requestHeader, offsetTable); } else { log.warn("invokeBrokerToResetOffset getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertInvokeBrokerToResetOffsetWithSuccess() throws Exception { ResetOffsetBody responseBody = new ResetOffsetBody(); setResponseSuccess(RemotingSerializable.encode(responseBody)); ResetOffsetRequestHeader requestHeader = mock(ResetOffsetRequestHeader.class); CompletableFuture<Map<MessageQueue, Long>> actual = mqClientAdminImpl.invokeBrokerToResetOffset(defaultBrokerAddr, requestHeader, defaultTimeout); assertEquals(0, actual.get().size()); }
LatencyTrackingAggregatingState( String stateName, InternalAggregatingState<K, N, IN, ACC, OUT> original, LatencyTrackingStateConfig latencyTrackingStateConfig) { super( original, new AggregatingStateLatencyMetrics( stateName, latencyTrackingStateConfig.getMetricGroup(), latencyTrackingStateConfig.getSampleInterval(), latencyTrackingStateConfig.getHistorySize(), latencyTrackingStateConfig.isStateNameAsVariable())); }
@Test @SuppressWarnings({"unchecked", "rawtypes"}) void testLatencyTrackingAggregatingState() throws Exception { AbstractKeyedStateBackend<Integer> keyedBackend = createKeyedBackend(getKeySerializer()); try { LatencyTrackingAggregatingState<Integer, VoidNamespace, Long, Long, Long> latencyTrackingState = (LatencyTrackingAggregatingState) createLatencyTrackingState(keyedBackend, getStateDescriptor()); latencyTrackingState.setCurrentNamespace(VoidNamespace.INSTANCE); LatencyTrackingAggregatingState.AggregatingStateLatencyMetrics latencyTrackingStateMetric = latencyTrackingState.getLatencyTrackingStateMetric(); assertThat(latencyTrackingStateMetric.getAddCount()).isZero(); assertThat(latencyTrackingStateMetric.getGetCount()).isZero(); assertThat(latencyTrackingStateMetric.getMergeNamespaceCount()).isZero(); setCurrentKey(keyedBackend); ThreadLocalRandom random = ThreadLocalRandom.current(); for (int index = 1; index <= SAMPLE_INTERVAL; index++) { int expectedResult = index == SAMPLE_INTERVAL ? 0 : index; latencyTrackingState.add(random.nextLong()); assertThat(latencyTrackingStateMetric.getAddCount()).isEqualTo(expectedResult); latencyTrackingState.get(); assertThat(latencyTrackingStateMetric.getGetCount()).isEqualTo(expectedResult); latencyTrackingState.mergeNamespaces( VoidNamespace.INSTANCE, Collections.emptyList()); assertThat(latencyTrackingStateMetric.getMergeNamespaceCount()) .isEqualTo(expectedResult); } } finally { if (keyedBackend != null) { keyedBackend.close(); keyedBackend.dispose(); } } }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain filterChain) throws IOException { boolean isAuthenticated = authenticate(request, response); response.setContentType(MediaTypes.JSON); try (JsonWriter jsonWriter = JsonWriter.of(response.getWriter())) { jsonWriter.beginObject(); jsonWriter.prop("valid", isAuthenticated); jsonWriter.endObject(); } }
@Test public void doFilter_whenForceAuthentication_shouldReturnFalse() throws Exception { settings.setProperty("sonar.forceAuthentication", "true"); underTest.doFilter(request, response, chain); verifyResponseIsFalse(); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testSizeInMemory() { RMapCacheNative<Integer, Integer> map = redisson.getMapCacheNative("test"); for (int i = 0; i < 10; i++) { map.put(i, i, Duration.ofSeconds(5)); } assertThat(map.sizeInMemory()).isGreaterThanOrEqualTo(272); }
private List<SlobrokEntryResponse> getSlobrokEntries(RestApi.RequestContext context) { String instanceId = context.pathParameters().getStringOrThrow("instanceId"); String pattern = context.queryParameters().getString("pattern").orElse(null); ApplicationInstanceReference reference = parseInstanceId(instanceId); ApplicationId applicationId = OrchestratorUtil.toApplicationId(reference); if (pattern == null) { pattern = DEFAULT_SLOBROK_PATTERN; } List<Mirror.Entry> entries = slobrokApi.lookup(applicationId, pattern); return entries.stream() .map(entry -> new SlobrokEntryResponse(entry.getName(), entry.getSpecString())) .toList(); }
@Test void testGetSlobrokEntries() throws Exception { testGetSlobrokEntriesWith("foo", "foo"); }
public Object valueFrom(Struct struct) { return valueFrom(struct, true); }
@Test void shouldFindValueInMap() { Map<String, Object> foo = new HashMap<>(); foo.put("bar", 42); foo.put("baz", null); Map<String, Object> map = new HashMap<>(); map.put("foo", foo); assertEquals(42, pathV2("foo.bar").valueFrom(map)); assertNull(pathV2("foo.baz").valueFrom(map)); }
public boolean validate(final CommandLine input) { for(Option o : input.getOptions()) { if(Option.UNINITIALIZED == o.getArgs()) { continue; } if(o.hasOptionalArg()) { continue; } if(o.getArgs() != o.getValuesList().size()) { console.printf("Missing argument for option %s%n", o.getLongOpt()); return false; } } final TerminalAction action = TerminalActionFinder.get(input); if(null == action) { console.printf("%s%n", "Missing argument"); return false; } if(input.hasOption(TerminalOptionsBuilder.Params.existing.name())) { final String arg = input.getOptionValue(TerminalOptionsBuilder.Params.existing.name()); if(null == TransferAction.forName(arg)) { final Set<TransferAction> actions = new HashSet<TransferAction>(TransferAction.forTransfer(Transfer.Type.download)); actions.add(TransferAction.cancel); console.printf("Invalid argument '%s' for option %s. Must be one of %s%n", arg, TerminalOptionsBuilder.Params.existing.name(), Arrays.toString(actions.toArray())); return false; } switch(action) { case download: if(!validate(arg, Transfer.Type.download)) { return false; } break; case upload: if(!validate(arg, Transfer.Type.upload)) { return false; } break; case synchronize: if(!validate(arg, Transfer.Type.sync)) { return false; } break; case copy: if(!validate(arg, Transfer.Type.copy)) { return false; } break; } } // Validate arguments switch(action) { case list: case download: if(!validate(input.getOptionValue(action.name()))) { return false; } break; case upload: case copy: case synchronize: if(!validate(input.getOptionValue(action.name()))) { return false; } break; } return true; }
@Test public void testValidateProfile() throws Exception { final Set<Protocol> list = new HashSet<>(Arrays.asList( new SwiftProtocol(), new ProfilePlistReader(new ProtocolFactory(Collections.singleton(new SwiftProtocol()))) .read(this.getClass().getResourceAsStream("/Rackspace US.cyberduckprofile")) )); assertTrue(new TerminalOptionsInputValidator(new ProtocolFactory(list)).validate("rackspace://cdn.duck.sh/%%~nc")); }
static boolean containsIn(CloneGroup first, CloneGroup second) { if (first.getCloneUnitLength() > second.getCloneUnitLength()) { return false; } List<ClonePart> firstParts = first.getCloneParts(); List<ClonePart> secondParts = second.getCloneParts(); return SortedListsUtils.contains(secondParts, firstParts, new ContainsInComparator(second.getCloneUnitLength(), first.getCloneUnitLength())) && SortedListsUtils.contains(firstParts, secondParts, ContainsInComparator.RESOURCE_ID_COMPARATOR); }
@Test public void one_part_of_C2_covers_two_parts_of_C1() { // Note that line numbers don't matter for method which we test. CloneGroup c1 = newCloneGroup(1, newClonePart("a", 0), newClonePart("a", 2), newClonePart("b", 0), newClonePart("b", 2)); CloneGroup c2 = newCloneGroup(3, newClonePart("a", 0), newClonePart("b", 0)); assertThat(Filter.containsIn(c1, c2), is(true)); assertThat(Filter.containsIn(c2, c1), is(false)); }
public Frequency subtract(Frequency value) { return new Frequency(this.frequency - value.frequency); }
@Test public void testSubtract() { Frequency high = Frequency.ofGHz(1); Frequency low = Frequency.ofMHz(100); Frequency expected = Frequency.ofMHz(900); assertThat(high.subtract(low), is(expected)); }
public static Object get(Object object, int index) { if (index < 0) { throw new IndexOutOfBoundsException("Index cannot be negative: " + index); } if (object instanceof Map) { Map map = (Map) object; Iterator iterator = map.entrySet().iterator(); return get(iterator, index); } else if (object instanceof List) { return ((List) object).get(index); } else if (object instanceof Object[]) { return ((Object[]) object)[index]; } else if (object instanceof Iterator) { Iterator it = (Iterator) object; while (it.hasNext()) { index--; if (index == -1) { return it.next(); } else { it.next(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object instanceof Collection) { Iterator iterator = ((Collection) object).iterator(); return get(iterator, index); } else if (object instanceof Enumeration) { Enumeration it = (Enumeration) object; while (it.hasMoreElements()) { index--; if (index == -1) { return it.nextElement(); } else { it.nextElement(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object == null) { throw new IllegalArgumentException("Unsupported object type: null"); } else { try { return Array.get(object, index); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName()); } } }
@Test void testGetMap2() { assertThrows(IndexOutOfBoundsException.class, () -> { Map<String, String> map = new HashMap<>(); map.put("key1", "value1"); CollectionUtils.get(map, -1); CollectionUtils.get(map, 1); }); }
Double calculateAverage(List<Double> durationEntries) { double sum = 0.0; for (Double duration : durationEntries) { sum = sum + duration; } if (sum == 0) { return 0.0; } return sum / durationEntries.size(); }
@Test void calculateAverageFromList() { OutputStream out = new ByteArrayOutputStream(); UsageFormatter usageFormatter = new UsageFormatter(out); Double result = usageFormatter .calculateAverage(asList(1.0, 2.0, 3.0)); assertThat(result, is(closeTo(2.0, EPSILON))); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testBloodEssenceCheck() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHECK_BLOOD_ESSENCE, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_BLOOD_ESSENCE, 56); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { switch (request.getCode()) { case RequestCode.HEART_BEAT: return this.heartBeat(ctx, request); case RequestCode.UNREGISTER_CLIENT: return this.unregisterClient(ctx, request); case RequestCode.CHECK_CLIENT_CONFIG: return this.checkClientConfig(ctx, request); default: break; } return null; }
@Test public void processRequest_UnRegisterConsumer() throws RemotingCommandException { ConsumerGroupInfo consumerGroupInfo = brokerController.getConsumerManager().getConsumerGroupInfo(group); assertThat(consumerGroupInfo).isNotNull(); RemotingCommand request = createUnRegisterConsumerCommand(); RemotingCommand response = clientManageProcessor.processRequest(handlerContext, request); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); consumerGroupInfo = brokerController.getConsumerManager().getConsumerGroupInfo(group); assertThat(consumerGroupInfo).isNull(); }
public Set<T> keySet() { return map.keySet(); }
@Test public void testKeySet() { counter.add("key1", 1); counter.add("key2", 1); assertEquals(new HashSet<>(asList("key1", "key2")), counter.keySet()); }
@CheckForNull public User getSubmitter() { return submitter; }
@Test public void empty_in_submitterLogin_is_considered_as_null() { CeTask ceTask = underTest.setUuid("uuid").setType("type") .setSubmitter(new CeTask.User("USER_ID", "")) .build(); assertThat(ceTask.getSubmitter().login()).isNull(); }
@SuppressWarnings("unchecked") public Future<Void> executeRunnable(final Runnable r) { return (Future<Void>) executor.submit(r::run); }
@Test public void testRunnableSucceeds() throws Exception { ExecutorServiceFuturePool futurePool = new ExecutorServiceFuturePool(executorService); final AtomicBoolean atomicBoolean = new AtomicBoolean(false); Future<Void> future = futurePool.executeRunnable(() -> atomicBoolean.set(true)); future.get(30, TimeUnit.SECONDS); assertTrue("atomicBoolean set to true?", atomicBoolean.get()); }
public DirectoryEntry lookUp( File workingDirectory, JimfsPath path, Set<? super LinkOption> options) throws IOException { checkNotNull(path); checkNotNull(options); DirectoryEntry result = lookUp(workingDirectory, path, options, 0); if (result == null) { // an intermediate file in the path did not exist or was not a directory throw new NoSuchFileException(path.toString()); } return result; }
@Test public void testLookup_relative_withDotDotsInPath_afterSymlink() throws IOException { assertExists(lookup("four/five/.."), "/", "/"); assertExists(lookup("four/six/.."), "/", "work"); }
public static void createTrustedCertificatesVolumeMounts(List<VolumeMount> volumeMountList, List<CertSecretSource> trustedCertificates, String tlsVolumeMountPath) { createTrustedCertificatesVolumeMounts(volumeMountList, trustedCertificates, tlsVolumeMountPath, null); }
@Test public void testTrustedCertificatesVolumeMountsWithPrefix() { CertSecretSource cert1 = new CertSecretSourceBuilder() .withSecretName("first-certificate") .withCertificate("ca.crt") .build(); CertSecretSource cert2 = new CertSecretSourceBuilder() .withSecretName("second-certificate") .withCertificate("tls.crt") .build(); CertSecretSource cert3 = new CertSecretSourceBuilder() .withSecretName("first-certificate") .withCertificate("ca2.crt") .build(); List<VolumeMount> mounts = new ArrayList<>(); CertUtils.createTrustedCertificatesVolumeMounts(mounts, List.of(cert1, cert2, cert3), "/my/path/", "prefixed"); assertThat(mounts.size(), is(2)); assertThat(mounts.get(0).getName(), is("prefixed-first-certificate")); assertThat(mounts.get(0).getMountPath(), is("/my/path/first-certificate")); assertThat(mounts.get(1).getName(), is("prefixed-second-certificate")); assertThat(mounts.get(1).getMountPath(), is("/my/path/second-certificate")); }
public void unregisterEventNotification(String id) { removeGrantsForTarget(grnRegistry.newGRN(GRNTypes.EVENT_NOTIFICATION, id)); }
@Test void unregisterEventNotification() { entityOwnershipService.unregisterEventNotification("1234"); assertGrantRemoval(GRNTypes.EVENT_NOTIFICATION, "1234"); }
@Override public int run(String[] args) throws Exception { try { webServiceClient = WebServiceClient.getWebServiceClient().createClient(); return runCommand(args); } finally { if (yarnClient != null) { yarnClient.close(); } if (webServiceClient != null) { webServiceClient.destroy(); } } }
@Test (timeout = 15000) public void testFetchApplictionLogsAsAnotherUser() throws Exception { String remoteLogRootDir = "target/logs/"; String rootLogDir = "target/LocalLogs"; String testUser = "test"; UserGroupInformation testUgi = UserGroupInformation .createRemoteUser(testUser); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); conf .set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); FileSystem fs = FileSystem.get(conf); ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId .newContainerId(appAttemptId, 1); NodeId nodeId = NodeId.newInstance("localhost", 1234); try { Path rootLogDirPath = new Path(rootLogDir); if (fs.exists(rootLogDirPath)) { fs.delete(rootLogDirPath, true); } assertTrue(fs.mkdirs(rootLogDirPath)); // create local app dir for app final Path appLogsDir = new Path(rootLogDirPath, appId.toString()); if (fs.exists(appLogsDir)) { fs.delete(appLogsDir, true); } assertTrue(fs.mkdirs(appLogsDir)); List<String> rootLogDirs = Arrays.asList(rootLogDir); List<String> logTypes = new ArrayList<String>(); logTypes.add("syslog"); // create container logs in localLogDir for app createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes, Collections.emptyList()); // create the remote app dir for app but for a different user testUser Path path = new Path(remoteLogRootDir + testUser + "/bucket-logs-tfile/0001/" + appId); if (fs.exists(path)) { fs.delete(path, true); } assertTrue(fs.mkdirs(path)); // upload container logs for app into remote dir uploadContainerLogIntoRemoteDir(testUgi, conf, rootLogDirs, nodeId, containerId, path, fs); YarnClient mockYarnClient = createMockYarnClient( YarnApplicationState.FINISHED, testUgi.getShortUserName()); LogsCLI cli = new LogsCLIForTest(mockYarnClient); cli.setConf(conf); // Verify that we can get the application logs by specifying // a correct appOwner int exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-appOwner", testUser}); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( logMessage(containerId, "syslog"))); sysOutStream.reset(); // Verify that we can not get the application logs // if an invalid user is specified exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-appOwner", "invalid"}); assertTrue(exitCode == -1); assertTrue(sysErrStream.toString().contains("Can not find the logs " + "for the application: " + appId.toString())); sysErrStream.reset(); // Verify that we do not specify appOwner, and can not // get appReport from RM, we still can figure out the appOwner // and can get app logs successfully. YarnClient mockYarnClient2 = createMockYarnClientUnknownApp(); cli = new LogsCLIForTest(mockYarnClient2); cli.setConf(conf); exitCode = cli.run(new String[] { "-applicationId", appId.toString()}); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( logMessage(containerId, "syslog"))); sysOutStream.reset(); // Verify appOwner guessed correctly with older log dir dtructure path = new Path(remoteLogRootDir + testUser + "/logs/" + appId); if (fs.exists(path)) { fs.delete(path, true); } assertTrue(fs.mkdirs(path)); uploadContainerLogIntoRemoteDir(testUgi, conf, rootLogDirs, nodeId, containerId, path, fs); exitCode = cli.run(new String[] { "-applicationId", appId.toString()}); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( logMessage(containerId, "syslog"))); sysOutStream.reset(); // Verify that we could get the err message "Can not find the appOwner" // if we do not specify the appOwner, can not get appReport, and // the app does not exist in remote dir. ApplicationId appId2 = ApplicationId.newInstance( System.currentTimeMillis(), 2); exitCode = cli.run(new String[] { "-applicationId", appId2.toString()}); assertTrue(exitCode == -1); assertTrue(sysErrStream.toString().contains( "Can not find the appOwner")); sysErrStream.reset(); // Verify that we could not get appOwner // because we don't have file-system permissions ApplicationId appTest = ApplicationId.newInstance( System.currentTimeMillis(), 1000); String priorityUser = "priority"; Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser + "/bucket-logs-tfile/1000/" + appTest); if (fs.exists(pathWithoutPerm)) { fs.delete(pathWithoutPerm, true); } // The user will not have read permission for this directory. // To mimic the scenario that the user can not get file status FsPermission permission = FsPermission .createImmutable((short) 01300); assertTrue(fs.mkdirs(pathWithoutPerm, permission)); exitCode = cli.run(new String[] { "-applicationId", appTest.toString()}); assertTrue(exitCode == -1); assertTrue(sysErrStream.toString().contains( "Can not find the logs for the application: " + appTest.toString())); sysErrStream.reset(); } finally { fs.delete(new Path(remoteLogRootDir), true); fs.delete(new Path(rootLogDir), true); } }
public static byte[] signMessage(RawTransaction rawTransaction, Credentials credentials) { byte[] encodedTransaction; if (rawTransaction.getTransaction().getType().isEip4844()) { encodedTransaction = encode4844(rawTransaction); } else { encodedTransaction = encode(rawTransaction); } Sign.SignatureData signatureData = Sign.signMessage(encodedTransaction, credentials.getEcKeyPair()); return encode(rawTransaction, signatureData); }
@Test public void testEip155TransactionWithLargeChainId() { assertArrayEquals( TransactionEncoder.signMessage( createEip155RawTransaction(), Long.MAX_VALUE, SampleKeys.CREDENTIALS_ETH_EXAMPLE), (Numeric.hexStringToByteArray( "0xf875098504a817c800825208943535353535353535353535353535353535353535880de0b6b3a76400008089010000000000000021a0ed14bd16ddd7788623f4439db3ddbc8bf548c241c3af87819c187a638ef40e17a03b4972ee3adb77b6b06784d12fe098c2cb84c03afd79d17b1caf8f63483101f0"))); }
public static boolean passCheck(ResourceWrapper resourceWrapper, /*@Valid*/ ParamFlowRule rule, /*@Valid*/ int count, Object... args) { if (args == null) { return true; } int paramIdx = rule.getParamIdx(); if (args.length <= paramIdx) { return true; } // Get parameter value. Object value = args[paramIdx]; // Assign value with the result of paramFlowKey method if (value instanceof ParamFlowArgument) { value = ((ParamFlowArgument) value).paramFlowKey(); } // If value is null, then pass if (value == null) { return true; } if (rule.isClusterMode() && rule.getGrade() == RuleConstant.FLOW_GRADE_QPS) { return passClusterCheck(resourceWrapper, rule, count, value); } return passLocalCheck(resourceWrapper, rule, count, value); }
@Test public void testPassLocalCheckForArray() throws InterruptedException { final String resourceName = "testPassLocalCheckForArray"; final ResourceWrapper resourceWrapper = new StringResourceWrapper(resourceName, EntryType.IN); int paramIdx = 0; double globalThreshold = 1; ParamFlowRule rule = new ParamFlowRule(resourceName).setParamIdx(paramIdx) .setControlBehavior(RuleConstant.CONTROL_BEHAVIOR_RATE_LIMITER).setCount(globalThreshold); TimeUtil.currentTimeMillis(); String v1 = "a", v2 = "B", v3 = "Cc"; Object arr = new String[]{v1, v2, v3}; ParameterMetric metric = new ParameterMetric(); ParameterMetricStorage.getMetricsMap().put(resourceWrapper.getName(), metric); metric.getRuleTimeCounterMap().put(rule, new ConcurrentLinkedHashMapWrapper<Object, AtomicLong>(4000)); assertTrue(ParamFlowChecker.passCheck(resourceWrapper, rule, 1, arr)); assertFalse(ParamFlowChecker.passCheck(resourceWrapper, rule, 1, arr)); }
@Override protected double maintain() { expireReserved(); return (deprovisionRemovable() + pruneReals()) / 2; }
@Test public void expire_inactive() { LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(), Duration.ofDays(1), tester.loadBalancerService(), new TestMetric()); Supplier<Map<LoadBalancerId, LoadBalancer>> loadBalancers = () -> tester.nodeRepository().database().readLoadBalancers((ignored) -> true); // Deploy two applications with a total of three load balancers ClusterSpec.Id cluster1 = ClusterSpec.Id.from("qrs"); ClusterSpec.Id cluster2 = ClusterSpec.Id.from("qrs2"); ApplicationId app1 = ProvisioningTester.applicationId(); ApplicationId app2 = ProvisioningTester.applicationId(); LoadBalancerId lb1 = new LoadBalancerId(app1, cluster1); LoadBalancerId lb2 = new LoadBalancerId(app2, cluster1); LoadBalancerId lb3 = new LoadBalancerId(app2, cluster2); deployApplication(app1, cluster1); deployApplication(app2, cluster1, cluster2); assertEquals(3, loadBalancers.get().size()); // Remove one application deactivates load balancers for that application tester.remove(app1); assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state()); assertNotSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb2).state()); // Expirer defers removal while nodes are still allocated to application tester.move(Node.State.ready, tester.nodeRepository().nodes().list(Node.State.dirty).asList()); expirer.maintain(); assertEquals(Set.of(), tester.loadBalancerService().instances().get(lb1).reals()); assertEquals(Set.of(), loadBalancers.get().get(lb1).instance().get().reals()); // Expirer defers removal of load balancer until expiration time passes expirer.maintain(); assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state()); assertTrue("Inactive load balancer not removed", tester.loadBalancerService().instances().containsKey(lb1)); // Expirer removes load balancers once expiration time passes tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); expirer.maintain(); assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb1)); // Active load balancer is left alone assertSame(LoadBalancer.State.active, loadBalancers.get().get(lb2).state()); assertTrue("Active load balancer is not removed", tester.loadBalancerService().instances().containsKey(lb2)); // A single cluster is removed deployApplication(app2, cluster1); expirer.maintain(); assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb3).state()); // Expirer defers removal while nodes are still allocated to cluster expirer.maintain(); assertEquals(2, tester.loadBalancerService().instances().size()); removeNodesOf(app2, cluster2); // Expirer removes load balancer for removed cluster tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); expirer.maintain(); assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb3)); }
public BackgroundException map(HttpResponse response) throws IOException { final S3ServiceException failure; if(null == response.getEntity()) { failure = new S3ServiceException(response.getStatusLine().getReasonPhrase()); } else { EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(), EntityUtils.toString(response.getEntity())); } failure.setResponseCode(response.getStatusLine().getStatusCode()); if(response.containsHeader(MINIO_ERROR_CODE)) { failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue()); } if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) { failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue()); } return this.map(failure); }
@Test public void testLoginFailure() { final ServiceException f = new ServiceException("m", "<null/>"); f.setResponseCode(401); f.setErrorMessage("m"); assertTrue(new S3ExceptionMappingService().map(f) instanceof LoginFailureException); }
public boolean existCompaction(long txnId) { return compactionScheduler.existCompaction(txnId); }
@Test public void testExistCompaction() { long txnId = 11111; CompactionMgr compactionManager = new CompactionMgr(); CompactionScheduler compactionScheduler = new CompactionScheduler(compactionManager, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(), GlobalStateMgr.getCurrentState().getGlobalTransactionMgr(), GlobalStateMgr.getCurrentState(), ""); compactionManager.setCompactionScheduler(compactionScheduler); new MockUp<CompactionScheduler>() { @Mock public ConcurrentHashMap<PartitionIdentifier, CompactionJob> getRunningCompactions() { ConcurrentHashMap<PartitionIdentifier, CompactionJob> r = new ConcurrentHashMap<>(); PartitionIdentifier partitionIdentifier = new PartitionIdentifier(1, 2, 3); Database db = new Database(); Table table = new LakeTable(); PhysicalPartition partition = new Partition(123, "aaa", null, null); CompactionJob job = new CompactionJob(db, table, partition, txnId, false); r.put(partitionIdentifier, job); return r; } }; Assert.assertEquals(true, compactionManager.existCompaction(txnId)); }
@PostMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE, signType = SignType.CONSOLE) public Result<Boolean> createNamespace(NamespaceForm namespaceForm) throws NacosException { namespaceForm.validate(); String namespaceId = namespaceForm.getNamespaceId(); String namespaceName = namespaceForm.getNamespaceName(); String namespaceDesc = namespaceForm.getNamespaceDesc(); if (StringUtils.isBlank(namespaceId)) { namespaceId = UUID.randomUUID().toString(); } else { namespaceId = namespaceId.trim(); if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "namespaceId [" + namespaceId + "] mismatch the pattern"); } if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "too long namespaceId, over " + NAMESPACE_ID_MAX_LENGTH); } // check unique if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "the namespaceId is existed, namespaceId: " + namespaceForm.getNamespaceId()); } } // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "namespaceName [" + namespaceName + "] contains illegal char"); } return Result.success(namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc)); }
@Test void testCreateNamespace() throws NacosException { when(namespaceOperationService.createNamespace(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC)).thenReturn(true); Result<Boolean> result = namespaceControllerV2.createNamespace( new NamespaceForm(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC)); verify(namespaceOperationService).createNamespace(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC); assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode()); assertTrue(result.getData()); }
@Override public void handleSaveError(String key, Exception e) { log.error("Failed saving rate for " + key + ", returning unsaved rate", e); }
@Test public void testHandleSaveErrorShouldNotThrowException() { target.handleSaveError("key", new Exception()); }
static String resolveLocalRepoPath(String localRepoPath) { // todo decouple home folder resolution // find homedir String home = System.getenv("ZEPPELIN_HOME"); if (home == null) { home = System.getProperty("zeppelin.home"); } if (home == null) { home = ".."; } return Paths.get(home).resolve(localRepoPath).toAbsolutePath().toString(); }
@Test void should_return_absolute_path() { String resolvedPath = Booter.resolveLocalRepoPath("path"); assertTrue(Paths.get(resolvedPath).isAbsolute()); }
@Override public void handle(final MetaData metaData) { metaData.updateContextPath(); MetaData metaExist = META_DATA.get(metaData.getPath()); List<TarsInvokePrx> prxList = ApplicationConfigCache.getInstance() .get(metaData.getPath()).getTarsInvokePrxList(); boolean exist = prxList.stream().anyMatch(tarsInvokePrx -> tarsInvokePrx.getHost().equals(metaData.getAppName())); if (!exist) { ApplicationConfigCache.getInstance().initPrx(metaData); } if (Objects.isNull(metaExist)) { META_DATA.put(metaData.getPath(), metaData); } }
@Test public void testOnSubscribe() { tarsMetaDataHandler.handle(metaData); /** * test for cache; */ tarsMetaDataHandler.handle(metaData); }
@Bean("DefaultScannerWsClient") public DefaultScannerWsClient provide(ScannerProperties scannerProps, EnvironmentInformation env, GlobalAnalysisMode globalMode, System2 system, AnalysisWarnings analysisWarnings, SonarUserHome sonarUserHome) { String url = defaultIfBlank(scannerProps.property("sonar.host.url"), "http://localhost:9000"); HttpConnector.Builder connectorBuilder = HttpConnector.newBuilder().acceptGzip(true); String oldSocketTimeout = defaultIfBlank(scannerProps.property(READ_TIMEOUT_SEC_PROPERTY), valueOf(DEFAULT_READ_TIMEOUT_SEC)); String socketTimeout = defaultIfBlank(scannerProps.property(SONAR_SCANNER_SOCKET_TIMEOUT), oldSocketTimeout); String connectTimeout = defaultIfBlank(scannerProps.property(SONAR_SCANNER_CONNECT_TIMEOUT), valueOf(DEFAULT_CONNECT_TIMEOUT)); String responseTimeout = defaultIfBlank(scannerProps.property(SONAR_SCANNER_RESPONSE_TIMEOUT), valueOf(DEFAULT_RESPONSE_TIMEOUT)); String envVarToken = defaultIfBlank(system.envVariable(TOKEN_ENV_VARIABLE), null); String token = defaultIfBlank(scannerProps.property(TOKEN_PROPERTY), envVarToken); String login = defaultIfBlank(scannerProps.property(CoreProperties.LOGIN), token); var sslContext = configureSsl(parseSslConfig(scannerProps, sonarUserHome), system); connectorBuilder .readTimeoutMilliseconds(parseDurationProperty(socketTimeout, SONAR_SCANNER_SOCKET_TIMEOUT)) .connectTimeoutMilliseconds(parseDurationProperty(connectTimeout, SONAR_SCANNER_CONNECT_TIMEOUT)) .responseTimeoutMilliseconds(parseDurationProperty(responseTimeout, SONAR_SCANNER_RESPONSE_TIMEOUT)) .userAgent(env.toString()) .url(url) .credentials(login, scannerProps.property(CoreProperties.PASSWORD)) .setSSLSocketFactory(sslContext.getSslSocketFactory()) .setTrustManager(sslContext.getTrustManager().orElseThrow()); // OkHttp detects 'http.proxyHost' java property already, so just focus on sonar properties String proxyHost = defaultIfBlank(scannerProps.property("sonar.scanner.proxyHost"), null); if (proxyHost != null) { String proxyPortStr = defaultIfBlank(scannerProps.property(SONAR_SCANNER_PROXY_PORT), url.startsWith("https") ? "443" : "80"); var proxyPort = parseIntProperty(proxyPortStr, SONAR_SCANNER_PROXY_PORT); connectorBuilder.proxy(new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyHost, proxyPort))); } var scannerProxyUser = scannerProps.property("sonar.scanner.proxyUser"); String proxyUser = scannerProxyUser != null ? scannerProxyUser : system.properties().getProperty(HTTP_PROXY_USER, ""); if (isNotBlank(proxyUser)) { var scannerProxyPwd = scannerProps.property("sonar.scanner.proxyPassword"); String proxyPassword = scannerProxyPwd != null ? scannerProxyPwd : system.properties().getProperty(HTTP_PROXY_PASSWORD, ""); connectorBuilder.proxyCredentials(proxyUser, proxyPassword); } return new DefaultScannerWsClient(WsClientFactories.getDefault().newClient(connectorBuilder.build()), login != null, globalMode, analysisWarnings); }
@Test void provide_client_with_custom_settings() { scannerProps.put("sonar.host.url", "https://here/sonarqube"); scannerProps.put("sonar.token", "testToken"); scannerProps.put("sonar.ws.timeout", "42"); DefaultScannerWsClient client = underTest.provide(new ScannerProperties(scannerProps), env, GLOBAL_ANALYSIS_MODE, system2, ANALYSIS_WARNINGS, sonarUserHome); assertThat(client).isNotNull(); HttpConnector httpConnector = (HttpConnector) client.wsConnector(); assertThat(httpConnector.baseUrl()).isEqualTo("https://here/sonarqube/"); assertThat(httpConnector.okHttpClient().proxy()).isNull(); }
public @Nullable SubscriptionPath getSubscription() { return subscription == null ? null : subscription.get(); }
@Test public void noSubscriptionNoSplitGeneratesSubscription() throws Exception { TopicPath topicPath = PubsubClient.topicPathFromName("my_project", "my_topic"); factory = PubsubTestClient.createFactoryForCreateSubscription(); PubsubUnboundedSource source = new PubsubUnboundedSource( factory, StaticValueProvider.of(PubsubClient.projectPathFromId("my_project")), StaticValueProvider.of(topicPath), null /* subscription */, null /* timestampLabel */, null /* idLabel */, false /* needsAttributes */); assertThat(source.getSubscription(), nullValue()); assertThat(source.getSubscription(), nullValue()); PipelineOptions options = PipelineOptionsFactory.create(); PubsubSource actualSource = new PubsubSource(source); PubsubReader reader = actualSource.createReader(options, null); SubscriptionPath createdSubscription = reader.subscription; assertThat(createdSubscription, not(nullValue())); PubsubCheckpoint checkpoint = reader.getCheckpointMark(); assertThat(checkpoint.subscriptionPath, equalTo(createdSubscription.getPath())); checkpoint.finalizeCheckpoint(); PubsubCheckpoint deserCheckpoint = CoderUtils.clone(actualSource.getCheckpointMarkCoder(), checkpoint); assertThat(checkpoint.subscriptionPath, not(nullValue())); assertThat(checkpoint.subscriptionPath, equalTo(deserCheckpoint.subscriptionPath)); PubsubReader readerFromOriginal = actualSource.createReader(options, checkpoint); PubsubReader readerFromDeser = actualSource.createReader(options, deserCheckpoint); assertThat(readerFromOriginal.subscription, equalTo(createdSubscription)); assertThat(readerFromDeser.subscription, equalTo(createdSubscription)); }
protected static void addMatchingValues(List<ClassNameInformation> classes, String value, Dependency dep, EvidenceType type) { if (value == null || value.isEmpty() || classes == null || classes.isEmpty()) { return; } final HashSet<String> tested = new HashSet<>(); //TODO add a hashSet and only analyze any given key once. for (ClassNameInformation cni : classes) { //classes.forEach((cni) -> { for (String key : cni.getPackageStructure()) { //cni.getPackageStructure().forEach((key) -> { if (!tested.contains(key)) { tested.add(key); final int pos = StringUtils.indexOfIgnoreCase(value, key); if ((pos == 0 && (key.length() == value.length() || (key.length() < value.length() && !Character.isLetterOrDigit(value.charAt(key.length()))))) || (pos > 0 && !Character.isLetterOrDigit(value.charAt(pos - 1)) && (pos + key.length() == value.length() || (key.length() < value.length() && !Character.isLetterOrDigit(value.charAt(pos + key.length())))))) { dep.addEvidence(type, "jar", "package name", key, Confidence.HIGHEST); } } } } }
@Test public void testAddMatchingValues() throws Exception { File file = BaseTest.getResourceAsFile(this, "struts2-core-2.1.2.jar"); Dependency dependency = new Dependency(file); JarAnalyzer instance = new JarAnalyzer(); instance.initialize(getSettings()); instance.prepareFileTypeAnalyzer(null); final List<JarAnalyzer.ClassNameInformation> classNames = instance.collectClassNames(dependency); JarAnalyzer.addMatchingValues(classNames, "thevelocity", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "freemarkercore", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "the defaultpropertiesprovidertest", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "thedefaultpropertiesprovider test", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "thedefaultpropertiesprovidertest", dependency, EvidenceType.VENDOR); assertFalse(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("velocity")); assertFalse(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("freemarker")); assertFalse(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("defaultpropertiesprovider")); JarAnalyzer.addMatchingValues(classNames, "strutsexception", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "the velocity", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "freemarker core", dependency, EvidenceType.VENDOR); JarAnalyzer.addMatchingValues(classNames, "the defaultpropertiesprovider test", dependency, EvidenceType.VENDOR); assertTrue(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("strutsexception")); assertTrue(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("velocity")); assertTrue(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("freemarker")); assertTrue(dependency.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("defaultpropertiesprovider")); }
public static String get(String urlString, Charset customCharset) { return HttpRequest.get(urlString).charset(customCharset).execute().body(); }
@Test @Disabled public void getWeixinTest(){ // 测试特殊URL,即URL中有&amp;情况是否请求正常 final String url = "https://mp.weixin.qq.com/s?__biz=MzI5NjkyNTIxMg==&amp;mid=100000465&amp;idx=1&amp;sn=1044c0d19723f74f04f4c1da34eefa35&amp;chksm=6cbda3a25bca2ab4516410db6ce6e125badaac2f8c5548ea6e18eab6dc3c5422cb8cbe1095f7"; final String s = HttpUtil.get(url); Console.log(s); }
@Override public List<T> getClusterEndpoints() { long expiryTime = lastUpdateTime + currentReloadIntervalMs; if (expiryTime <= System.currentTimeMillis()) { try { ClusterResolver<T> newDelegate = reload(); this.lastUpdateTime = System.currentTimeMillis(); this.currentReloadIntervalMs = reloadIntervalMs; if (newDelegate != null) { delegateRef.set(newDelegate); lastReloadTimestamp = System.currentTimeMillis(); if (logger.isInfoEnabled()) { logger.info("Reload endpoints differ from the original list; next reload in {}[sec], Loaded endpoints={}", currentReloadIntervalMs / 1000, newDelegate.getClusterEndpoints()); } } } catch (Exception e) { this.currentReloadIntervalMs = Math.min(maxReloadIntervalMs, currentReloadIntervalMs * 2); logger.warn("Cluster resolve error; keeping the current Eureka endpoints; next reload in " + "{}[sec]", currentReloadIntervalMs / 1000, e); } } return delegateRef.get().getClusterEndpoints(); }
@Test(timeout = 30000) public void testDataAreReloadedPeriodically() throws Exception { List<AwsEndpoint> firstEndpointList = SampleCluster.UsEast1a.build(); factory.setEndpoints(firstEndpointList); // First endpoint list is loaded eagerly resolver = new ReloadingClusterResolver<>(factory, 1); assertThat(resolver.getClusterEndpoints(), is(equalTo(firstEndpointList))); // Swap with a different one List<AwsEndpoint> secondEndpointList = SampleCluster.UsEast1b.build(); factory.setEndpoints(secondEndpointList); assertThat(awaitUpdate(resolver, secondEndpointList), is(true)); }
void writeSample(String metricName, Number value, String... labelsAndValuesArray) { SimpleTextOutputStream stream = initGaugeType(metricName); stream.write(metricName).write('{'); for (int i = 0; i < labelsAndValuesArray.length; i += 2) { String labelValue = labelsAndValuesArray[i + 1]; if (labelValue != null && labelValue.indexOf('"') > -1) { labelValue = labelValue.replace("\"", "\\\""); } stream.write(labelsAndValuesArray[i]).write("=\"").write(labelValue).write('\"'); if (i + 2 != labelsAndValuesArray.length) { stream.write(','); } } stream.write("} ").write(value).write('\n'); }
@Test public void canWriteSampleWithLabels() { underTest.writeSample("my-other-metric", 123, "cluster", "local"); underTest.writeSample("my-other-metric", 456, "cluster", "local", "namespace", "my-ns"); String actual = writeToString(); assertTrue(actual.startsWith("# TYPE my-other-metric gauge"), "Gauge type line missing"); assertTrue(actual.contains("my-other-metric{cluster=\"local\"} 123"), "Cluster metric line missing"); assertTrue(actual.contains("my-other-metric{cluster=\"local\",namespace=\"my-ns\"} 456"), "Cluster and Namespace metric line missing"); }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_containsExactly_primitiveFloatArray_inOrder_success() { assertThat(array(1.0f, 2.0f, 3.0f)) .usingExactEquality() .containsExactly(array(1.0f, 2.0f, 3.0f)) .inOrder(); }
public void generate() throws IOException { packageNameByTypes.clear(); generatePackageInfo(); generateTypeStubs(); generateMessageHeaderStub(); for (final List<Token> tokens : ir.messages()) { final Token msgToken = tokens.get(0); final List<Token> messageBody = getMessageBody(tokens); final boolean hasVarData = -1 != findSignal(messageBody, Signal.BEGIN_VAR_DATA); int i = 0; final List<Token> fields = new ArrayList<>(); i = collectFields(messageBody, i, fields); final List<Token> groups = new ArrayList<>(); i = collectGroups(messageBody, i, groups); final List<Token> varData = new ArrayList<>(); collectVarData(messageBody, i, varData); final String decoderClassName = formatClassName(decoderName(msgToken.name())); final String decoderStateClassName = decoderClassName + "#CodecStates"; final FieldPrecedenceModel decoderPrecedenceModel = precedenceChecks.createDecoderModel( decoderStateClassName, tokens); generateDecoder(decoderClassName, msgToken, fields, groups, varData, hasVarData, decoderPrecedenceModel); final String encoderClassName = formatClassName(encoderName(msgToken.name())); final String encoderStateClassName = encoderClassName + "#CodecStates"; final FieldPrecedenceModel encoderPrecedenceModel = precedenceChecks.createEncoderModel( encoderStateClassName, tokens); generateEncoder(encoderClassName, msgToken, fields, groups, varData, hasVarData, encoderPrecedenceModel); } }
@Test void shouldMarkDeprecatedClasses() throws Exception { try (InputStream in = Tests.getLocalResource("deprecated-msg-test-schema.xml")) { final ParserOptions options = ParserOptions.builder().stopOnError(true).build(); final MessageSchema schema = parse(in, options); final IrGenerator irg = new IrGenerator(); ir = irg.generate(schema); outputManager.clear(); outputManager.setPackageName(ir.applicableNamespace()); generator().generate(); final String encoderFqcn = ir.applicableNamespace() + ".DeprecatedMessageEncoder"; final Class<?> encoderClazz = compile(encoderFqcn); assertNotNull(encoderClazz); assertTrue(encoderClazz.isAnnotationPresent(Deprecated.class)); final String decoderFqcn = ir.applicableNamespace() + ".DeprecatedMessageDecoder"; final Class<?> decoderClazz = compile(decoderFqcn); assertNotNull(decoderClazz); assertTrue(decoderClazz.isAnnotationPresent(Deprecated.class)); } }
public void replay(FeatureLevelRecord record) { VersionRange range = quorumFeatures.localSupportedFeature(record.name()); if (!range.contains(record.featureLevel())) { throw new RuntimeException("Tried to apply FeatureLevelRecord " + record + ", but this controller only " + "supports versions " + range); } if (record.name().equals(MetadataVersion.FEATURE_NAME)) { MetadataVersion mv = MetadataVersion.fromFeatureLevel(record.featureLevel()); metadataVersion.set(mv); log.info("Replayed a FeatureLevelRecord setting metadata.version to {}", mv); } else { if (record.featureLevel() == 0) { finalizedVersions.remove(record.name()); log.info("Replayed a FeatureLevelRecord removing feature {}", record.name()); } else { finalizedVersions.put(record.name(), record.featureLevel()); log.info("Replayed a FeatureLevelRecord setting feature {} to {}", record.name(), record.featureLevel()); } } }
@Test public void testReplay() { LogContext logContext = new LogContext(); SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext); FeatureLevelRecord record = new FeatureLevelRecord(). setName("foo").setFeatureLevel((short) 2); snapshotRegistry.idempotentCreateSnapshot(-1); FeatureControlManager manager = new FeatureControlManager.Builder(). setLogContext(logContext). setQuorumFeatures(features("foo", 1, 2)). setSnapshotRegistry(snapshotRegistry). setMetadataVersion(MetadataVersion.IBP_3_3_IV0). build(); manager.replay(record); snapshotRegistry.idempotentCreateSnapshot(123); assertEquals(new FinalizedControllerFeatures(versionMap("metadata.version", 4, "foo", 2), 123), manager.finalizedFeatures(123)); }
public static AppConfigurationEntry ticketCacheEntry() { return userKerberosAce; }
@Test public void testTicketCacheEntry() { AppConfigurationEntry entry = KerberosUtils.ticketCacheEntry(); assertNotNull(entry); }
public static ExecutableStage forGrpcPortRead( QueryablePipeline pipeline, PipelineNode.PCollectionNode inputPCollection, Set<PipelineNode.PTransformNode> initialNodes) { checkArgument( !initialNodes.isEmpty(), "%s must contain at least one %s.", GreedyStageFuser.class.getSimpleName(), PipelineNode.PTransformNode.class.getSimpleName()); // Choose the environment from an arbitrary node. The initial nodes may not be empty for this // subgraph to make any sense, there has to be at least one processor node // (otherwise the stage is gRPC Read -> gRPC Write, which doesn't do anything). Environment environment = getStageEnvironment(pipeline, initialNodes); ImmutableSet.Builder<PipelineNode.PTransformNode> fusedTransforms = ImmutableSet.builder(); fusedTransforms.addAll(initialNodes); Set<SideInputReference> sideInputs = new LinkedHashSet<>(); Set<UserStateReference> userStates = new LinkedHashSet<>(); Set<TimerReference> timers = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> fusedCollections = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> materializedPCollections = new LinkedHashSet<>(); Queue<PipelineNode.PCollectionNode> fusionCandidates = new ArrayDeque<>(); for (PipelineNode.PTransformNode initialConsumer : initialNodes) { fusionCandidates.addAll(pipeline.getOutputPCollections(initialConsumer)); sideInputs.addAll(pipeline.getSideInputs(initialConsumer)); userStates.addAll(pipeline.getUserStates(initialConsumer)); timers.addAll(pipeline.getTimers(initialConsumer)); } while (!fusionCandidates.isEmpty()) { PipelineNode.PCollectionNode candidate = fusionCandidates.poll(); if (fusedCollections.contains(candidate) || materializedPCollections.contains(candidate)) { // This should generally mean we get to a Flatten via multiple paths through the graph and // we've already determined what to do with the output. LOG.debug( "Skipping fusion candidate {} because it is {} in this {}", candidate, fusedCollections.contains(candidate) ? "fused" : "materialized", ExecutableStage.class.getSimpleName()); continue; } PCollectionFusibility fusibility = canFuse(pipeline, candidate, environment, fusedCollections); switch (fusibility) { case MATERIALIZE: materializedPCollections.add(candidate); break; case FUSE: // All of the consumers of the candidate PCollection can be fused into this stage. Do so. fusedCollections.add(candidate); fusedTransforms.addAll(pipeline.getPerElementConsumers(candidate)); for (PipelineNode.PTransformNode consumer : pipeline.getPerElementConsumers(candidate)) { // The outputs of every transform fused into this stage must be either materialized or // themselves fused away, so add them to the set of candidates. fusionCandidates.addAll(pipeline.getOutputPCollections(consumer)); sideInputs.addAll(pipeline.getSideInputs(consumer)); } break; default: throw new IllegalStateException( String.format( "Unknown type of %s %s", PCollectionFusibility.class.getSimpleName(), fusibility)); } } return ImmutableExecutableStage.ofFullComponents( pipeline.getComponents(), environment, inputPCollection, sideInputs, userStates, timers, fusedTransforms.build(), materializedPCollections, ExecutableStage.DEFAULT_WIRE_CODER_SETTINGS); }
@Test public void executableStageProducingSideInputMaterializesIt() { // impulse -- ParDo(createSide) // \_ ParDo(processMain) with side input from createSide // The ExecutableStage executing createSide must have an output. Environment env = Environments.createDockerEnvironment("common"); PTransform impulse = PTransform.newBuilder() .setUniqueName("impulse") .putOutputs("output", "impulsePC") .setSpec(FunctionSpec.newBuilder().setUrn(PTransformTranslation.IMPULSE_TRANSFORM_URN)) .build(); PTransform createSide = PTransform.newBuilder() .setUniqueName("createSide") .putInputs("input", "impulsePC") .putOutputs("output", "sidePC") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("common") .build(); PTransform processMain = PTransform.newBuilder() .setUniqueName("processMain") .putInputs("main", "impulsePC") .putInputs("side", "sidePC") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .putSideInputs("side", SideInput.getDefaultInstance()) .build() .toByteString())) .setEnvironmentId("common") .build(); PCollection sidePC = PCollection.newBuilder().setUniqueName("sidePC").build(); PCollection impulsePC = PCollection.newBuilder().setUniqueName("impulsePC").build(); QueryablePipeline p = QueryablePipeline.forPrimitivesIn( partialComponents .toBuilder() .putTransforms("impulse", impulse) .putTransforms("createSide", createSide) .putTransforms("processMain", processMain) .putPcollections("impulsePC", impulsePC) .putPcollections("sidePC", sidePC) .putEnvironments("common", env) .build()); PCollectionNode impulseOutput = getOnlyElement(p.getOutputPCollections(PipelineNode.pTransform("impulse", impulse))); ExecutableStage subgraph = GreedyStageFuser.forGrpcPortRead( p, impulseOutput, ImmutableSet.of(PipelineNode.pTransform("createSide", createSide))); assertThat( subgraph.getOutputPCollections(), contains(PipelineNode.pCollection("sidePC", sidePC))); }
public Map<V, Integer> outDegree() { Map<V, Integer> result = new HashMap<>(); for (V vertex : neighbors.keySet()) { result.put(vertex, neighbors.get(vertex).size()); } return result; }
@Test void outDegree() { Map<Character, Integer> result = graph.outDegree(); Map<Character, Integer> expected = new HashMap<>(7); expected.put('A', 1); expected.put('B', 2); expected.put('C', 0); expected.put('D', 1); expected.put('E', 0); expected.put('F', 1); expected.put('G', 0); assertEquals(expected, result); }
byte[] pollEntryBytes() throws IOException, InterruptedException { return pollEntryBytes(100); }
@Test public void testReaderWithBlockInternalFragmentation() throws IOException, InterruptedException { writeSegmentWithFirstBlockContainingInternalFragmentation(); try (DeadLetterQueueReader reader = new DeadLetterQueueReader(dir)) { byte[] rawStr = reader.pollEntryBytes(); assertNotNull(rawStr); assertEquals(stringOf(INTERNAL_FRAG_PAYLOAD_SIZE, 'A'), new String(rawStr, StandardCharsets.UTF_8)); rawStr = reader.pollEntryBytes(); assertNotNull(rawStr); assertEquals("BBBBBBBBBB", new String(rawStr, StandardCharsets.UTF_8)); } }
public static HostRestrictingAuthorizationFilter initializeState(Configuration conf) { String confName = HostRestrictingAuthorizationFilter.HDFS_CONFIG_PREFIX + HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG; String confValue = conf.get(confName); // simply pass a blank value if we do not have one set confValue = (confValue == null ? "" : confValue); Map<String, String> confMap = ImmutableMap.of(HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG , confValue); FilterConfig fc = new DatanodeHttpServer.MapBasedFilterConfig( HostRestrictingAuthorizationFilter.class.getName(), confMap); HostRestrictingAuthorizationFilter hostRestrictingAuthorizationFilter = new HostRestrictingAuthorizationFilter(); try { hostRestrictingAuthorizationFilter.init(fc); } catch (ServletException e) { throw new IllegalStateException( "Failed to initialize HostRestrictingAuthorizationFilter.", e); } return hostRestrictingAuthorizationFilter; }
@Test public void testMultipleChannels() { Configuration conf = new Configuration(); conf.set(CONFNAME, "*,*,/allowed"); HostRestrictingAuthorizationFilter filter = HostRestrictingAuthorizationFilterHandler.initializeState(conf); EmbeddedChannel channel1 = new CustomEmbeddedChannel("127.0.0.1", 1006, new HostRestrictingAuthorizationFilterHandler(filter)); EmbeddedChannel channel2 = new CustomEmbeddedChannel("127.0.0.2", 1006, new HostRestrictingAuthorizationFilterHandler(filter)); EmbeddedChannel channel3 = new CustomEmbeddedChannel("127.0.0.3", 1006, new HostRestrictingAuthorizationFilterHandler(filter)); FullHttpRequest allowedHttpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_one?op=OPEN"); FullHttpRequest allowedHttpRequest2 = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_two?op=OPEN"); FullHttpRequest allowedHttpRequest3 = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN"); assertTrue("Should successfully accept request", channel1.writeInbound(allowedHttpRequest)); assertTrue("Should successfully accept request, second time", channel2.writeInbound(allowedHttpRequest2)); // verify closing one channel does not affect remaining channels channel1.close(); assertTrue("Should successfully accept request, third time", channel3.writeInbound(allowedHttpRequest3)); }
public void execute() throws Exception { forward(); LOG.info("forwarding to master get result max journal id: {}", result.maxJournalId); ctx.getGlobalStateMgr().getJournalObservable().waitOn(result.maxJournalId, waitTimeoutMs); if (result.state != null) { MysqlStateType state = MysqlStateType.fromString(result.state); if (state != null) { ctx.getState().setStateType(state); if (result.isSetErrorMsg()) { ctx.getState().setMsg(result.getErrorMsg()); } if (state == MysqlStateType.EOF || state == MysqlStateType.OK) { afterForward(); } } } if (result.isSetResource_group_name()) { ctx.getAuditEventBuilder().setResourceGroup(result.getResource_group_name()); } if (result.isSetAudit_statistics()) { TAuditStatistics tAuditStatistics = result.getAudit_statistics(); if (ctx.getExecutor() != null) { ctx.getExecutor().setQueryStatistics(AuditStatisticsUtil.toProtobuf(tAuditStatistics)); } } }
@Test public void testForwardTooManyTimes() { ConnectContext connectContext = new ConnectContext(); connectContext.setForwardTimes(LeaderOpExecutor.MAX_FORWARD_TIMES); try { new LeaderOpExecutor(new OriginStatement("show frontends"), connectContext, RedirectStatus.FORWARD_NO_SYNC) .execute(); } catch (Exception e) { Assert.assertTrue(e instanceof ErrorReportException); Assert.assertEquals(ErrorCode.ERR_FORWARD_TOO_MANY_TIMES, ((ErrorReportException) e).getErrorCode()); return; } Assert.fail("should throw ERR_FORWARD_TOO_MANY_TIMES exception"); }
public static void verify(NetworkParameters params, Block block, int height, EnumSet<VerifyFlag> flags) throws VerificationException { verifyHeader(block); verifyTransactions(params, block, height, flags); }
@Test public void testBadTransactions() { // Re-arrange so the coinbase transaction is not first. Transaction tx1 = block700000.transactions.get(0); Transaction tx2 = block700000.transactions.get(1); block700000.transactions.set(0, tx2); block700000.transactions.set(1, tx1); try { Block.verify(TESTNET, block700000, Block.BLOCK_HEIGHT_GENESIS, EnumSet.noneOf(Block.VerifyFlag.class)); fail(); } catch (VerificationException e) { // We should get here. } }
public long getDirOffset() { return dir_offset; }
@Test public void getDirOffset() { assertEquals(TestParameters.VP_DIRECTORY_OFFSET, chmItsfHeader.getDirOffset()); }
@Override public void convertWeightsForChildQueues(FSQueue queue, CapacitySchedulerConfiguration csConfig) { List<FSQueue> children = queue.getChildQueues(); if (queue instanceof FSParentQueue || !children.isEmpty()) { QueuePath queuePath = new QueuePath(queue.getName()); if (queue.getName().equals(ROOT_QUEUE)) { csConfig.setNonLabeledQueueWeight(queuePath, queue.getWeight()); } children.forEach(fsQueue -> csConfig.setNonLabeledQueueWeight( new QueuePath(fsQueue.getName()), fsQueue.getWeight())); csConfig.setAutoQueueCreationV2Enabled(queuePath, true); } }
@Test public void testMultiWeightConversion() { FSQueue root = createFSQueues(1, 2, 3); converter.convertWeightsForChildQueues(root, csConfig); assertEquals("Number of properties", 24, csConfig.getPropsWithPrefix(PREFIX).size()); assertEquals("root weight", 1.0f, csConfig.getNonLabeledQueueWeight(ROOT), 0.0f); assertEquals("root.a weight", 1.0f, csConfig.getNonLabeledQueueWeight(ROOT_A), 0.0f); assertEquals("root.b weight", 2.0f, csConfig.getNonLabeledQueueWeight(ROOT_B), 0.0f); assertEquals("root.c weight", 3.0f, csConfig.getNonLabeledQueueWeight(ROOT_C), 0.0f); }
@Override public RouteContext route(final ShardingRule shardingRule) { RouteContext result = new RouteContext(); Collection<DataNode> dataNodes = getDataNodes(shardingRule, shardingRule.getShardingTable(logicTableName)); result.getOriginalDataNodes().addAll(originalDataNodes); for (DataNode each : dataNodes) { result.getRouteUnits().add( new RouteUnit(new RouteMapper(each.getDataSourceName(), each.getDataSourceName()), Collections.singleton(new RouteMapper(logicTableName, each.getTableName())))); } return result; }
@Test void assertRouteByShardingConditions() { ShardingStandardRoutingEngine standardRoutingEngine = createShardingStandardRoutingEngine("t_order", ShardingRoutingEngineFixtureBuilder.createShardingConditions("t_order"), mock(SQLStatementContext.class), new HintValueContext()); RouteContext routeContext = standardRoutingEngine.route(ShardingRoutingEngineFixtureBuilder.createBasedShardingRule()); List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits()); assertThat(routeContext.getRouteUnits().size(), is(1)); assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_1")); assertThat(routeUnits.get(0).getTableMappers().size(), is(1)); assertThat(routeUnits.get(0).getTableMappers().iterator().next().getActualName(), is("t_order_1")); assertThat(routeUnits.get(0).getTableMappers().iterator().next().getLogicName(), is("t_order")); }
public static Counter throttledTimeCounter(RpcMethod method) { LabeledMetricNameUtils.MetricNameBuilder nameBuilder = LabeledMetricNameUtils.MetricNameBuilder.baseNameBuilder(THROTTLED_TIME); nameBuilder.addLabel(RPC_METHOD, method.toString()); MetricName metricName = nameBuilder.build(METRICS_NAMESPACE); // for specific method Counter fineCounter = new DelegatingCounter(metricName, false, true); // for overall throttling time, used by runner for scaling decision Counter coarseCounter = BigQueryServicesImpl.StorageClientImpl.THROTTLING_MSECS; return new NestedCounter( MetricName.named( METRICS_NAMESPACE, metricName.getName() + coarseCounter.getName().getName()), fineCounter, coarseCounter); }
@Test public void testThrottledTimeCounter() throws Exception { // Setup TestMetricsContainer testContainer = new TestMetricsContainer(); MetricsEnvironment.setCurrentContainer(testContainer); // Test throttleCounter metric. Counter appendRowsThrottleCounter = BigQuerySinkMetrics.throttledTimeCounter(BigQuerySinkMetrics.RpcMethod.APPEND_ROWS); appendRowsThrottleCounter.inc(1); assertThat( appendRowsThrottleCounter.getName().getName(), equalTo("ThrottledTime*rpc_method:APPEND_ROWS;throttling-msecs")); // check that both sub-counters have been incremented MetricName counterName = MetricName.named("BigQuerySink", "ThrottledTime*rpc_method:APPEND_ROWS;"); testContainer.assertPerWorkerCounterValue(counterName, 1L); counterName = MetricName.named( BigQueryServicesImpl.StorageClientImpl.class, Metrics.THROTTLE_TIME_COUNTER_NAME); assertEquals(1L, (long) testContainer.getCounter(counterName).getCumulative()); }
public static String getPath(String basePath, String... parts) { StringJoiner stringJoiner = new StringJoiner(File.separator); stringJoiner.add(basePath); for (String part : parts) { stringJoiner.add(part); } return stringJoiner.toString(); }
@Test public void testGetPath() { assertEquals(URIUtils.getPath("http://foo/bar"), "http://foo/bar"); assertEquals(URIUtils.getPath("http://foo/bar", "table"), "http://foo/bar/table"); assertEquals(URIUtils.getPath("http://foo/bar", "table", "segment+%25"), "http://foo/bar/table/segment+%25"); assertEquals(URIUtils.getPath("/foo/bar", "table", "segment+%25"), "/foo/bar/table/segment+%25"); assertEquals(URIUtils.getPath("file:/foo/bar", "table", "segment+%25"), "file:/foo/bar/table/segment+%25"); }
public String getNewValue() { return newValue; }
@Test void getNewValue() { ConfigurationChangeEvent event = new ConfigurationChangeEvent(); event.setNewValue("newValue"); Assertions.assertEquals("newValue", event.getNewValue()); }
@Deprecated public static Method findMethodByMethodName(Class<?> clazz, String methodName) throws NoSuchMethodException, ClassNotFoundException { return findMethodByMethodSignature(clazz, methodName, null); }
@Test void testFindMethodByMethodName1() throws Exception { assertNotNull(ReflectUtils.findMethodByMethodName(Foo.class, "hello")); }
public Collection<String> getAllTableNames() { return tables.keySet(); }
@Test void assertGetAllTableNames() { assertThat(new EncryptRule("foo_db", createEncryptRuleConfiguration()).getAllTableNames(), is(Collections.singleton("t_encrypt"))); }
public void replaceAction(@NonNull Action a) { addOrReplaceAction(a); }
@SuppressWarnings("deprecation") @Test public void replaceAction() { CauseAction a1 = new CauseAction(); ParametersAction a2 = new ParametersAction(); thing.addAction(a1); thing.addAction(a2); CauseAction a3 = new CauseAction(); thing.replaceAction(a3); assertEquals(Arrays.asList(a2, a3), thing.getActions()); }
@Override public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) { AbstractWALEvent result; byte[] bytes = new byte[data.remaining()]; data.get(bytes); String dataText = new String(bytes, StandardCharsets.UTF_8); if (decodeWithTX) { result = decodeDataWithTX(dataText); } else { result = decodeDataIgnoreTX(dataText); } result.setLogSequenceNumber(logSequenceNumber); return result; }
@Test void assertDecodeWriteRowEventWithRaw() { MppTableData tableData = new MppTableData(); tableData.setTableName("public.test"); tableData.setOpType("INSERT"); tableData.setColumnsName(new String[]{"data"}); tableData.setColumnsType(new String[]{"raw"}); tableData.setColumnsVal(new String[]{"'7D'"}); ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes()); WriteRowEvent actual = (WriteRowEvent) new MppdbDecodingPlugin(null, false, false).decode(data, logSequenceNumber); assertThat(actual.getLogSequenceNumber(), is(logSequenceNumber)); assertThat(actual.getTableName(), is("test")); Object byteaObj = actual.getAfterRow().get(0); assertThat(byteaObj, instanceOf(PGobject.class)); assertThat(byteaObj.toString(), is("7D")); }
@Override public <VOut> KStream<K, VOut> processValues( final FixedKeyProcessorSupplier<? super K, ? super V, VOut> processorSupplier, final String... stateStoreNames ) { return processValues( processorSupplier, Named.as(builder.newProcessorName(PROCESSVALUES_NAME)), stateStoreNames ); }
@Test public void shouldNotAllowNullStoreNamesOnProcessValues() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.processValues(fixedKeyProcessorSupplier, (String[]) null)); assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); }
@Override public List<FileMetadata> listFilesWithMetadata(URI fileUri, boolean recursive) throws IOException { ImmutableList.Builder<FileMetadata> listBuilder = ImmutableList.builder(); GcsUri gcsFileUri = new GcsUri(fileUri); String prefix = gcsFileUri.getPrefix(); String bucketName = gcsFileUri.getBucketName(); visitFiles(gcsFileUri, recursive, blob -> { if (!blob.getName().equals(prefix)) { // Note: isDirectory flag is only set when listing with BlobListOption.currentDirectory() i.e non-recursively. // For simplicity, we check if a path is directory by checking if it ends with '/', as done in S3PinotFS. boolean isDirectory = blob.getName().endsWith(GcsUri.DELIMITER); FileMetadata.Builder fileBuilder = new FileMetadata.Builder().setFilePath(GcsUri.createGcsUri(bucketName, blob.getName()).toString()) .setLength(blob.getSize()).setIsDirectory(isDirectory); if (!isDirectory) { // Note: if it's a directory, updateTime is set to null, and calling this getter leads to NPE. // public Long getUpdateTime() { return updateTime; }. So skip this for directory. fileBuilder.setLastModifiedTime(blob.getUpdateTime()); } listBuilder.add(fileBuilder.build()); } }); ImmutableList<FileMetadata> listedFiles = listBuilder.build(); LOGGER.info("Listed {} files from URI: {}, is recursive: {}", listedFiles.size(), gcsFileUri, recursive); return listedFiles; }
@Test public void testListFilesWithMetadata() throws Exception { skipIfNotConfigured(); // Create empty file Path emptyFile = _localTmpDir.resolve("empty"); emptyFile.toFile().createNewFile(); // Create 5 subfolders with files inside. int count = 5; Set<String> expectedNonRecursive = new HashSet<>(); Set<String> expectedRecursive = new HashSet<>(); for (int i = 0; i < count; i++) { GcsUri testDir = _dataDir.resolve("testDir" + i); _pinotFS.mkdir(testDir.getUri()); expectedNonRecursive.add(appendSlash(testDir).toString()); GcsUri testFile = testDir.resolve("testFile" + i); // Create the file by copying an empty file there. _pinotFS.copyFromLocalFile(emptyFile.toFile(), testFile.getUri()); expectedRecursive.add(appendSlash(testDir).toString()); expectedRecursive.add(testFile.toString()); } GcsUri testDirEmpty = _dataDir.resolve("testDirEmpty"); _pinotFS.mkdir(testDirEmpty.getUri()); expectedNonRecursive.add(appendSlash(testDirEmpty).toString()); expectedRecursive.add(appendSlash(testDirEmpty).toString()); GcsUri testRootFile = _dataDir.resolve("testRootFile"); _pinotFS.copyFromLocalFile(emptyFile.toFile(), testRootFile.getUri()); expectedNonRecursive.add(testRootFile.toString()); expectedRecursive.add(testRootFile.toString()); // Assert that recursive list files and nonrecursive list files are as expected String[] files = _pinotFS.listFiles(_dataDir.getUri(), false); Assert.assertEquals(files.length, count + 2); Assert.assertTrue(expectedNonRecursive.containsAll(Arrays.asList(files)), Arrays.toString(files)); files = _pinotFS.listFiles(_dataDir.getUri(), true); Assert.assertEquals(files.length, count * 2 + 2); Assert.assertTrue(expectedRecursive.containsAll(Arrays.asList(files)), Arrays.toString(files)); // Assert that recursive list files and nonrecursive list files with file info are as expected List<FileMetadata> fileMetadata = _pinotFS.listFilesWithMetadata(_dataDir.getUri(), false); Assert.assertEquals(fileMetadata.size(), count + 2); Assert.assertEquals(fileMetadata.stream().filter(FileMetadata::isDirectory).count(), count + 1); Assert.assertEquals(fileMetadata.stream().filter(f -> !f.isDirectory()).count(), 1); Assert.assertTrue(expectedNonRecursive.containsAll( fileMetadata.stream().map(FileMetadata::getFilePath).collect(Collectors.toSet())), fileMetadata.toString()); fileMetadata = _pinotFS.listFilesWithMetadata(_dataDir.getUri(), true); Assert.assertEquals(fileMetadata.size(), count * 2 + 2); Assert.assertEquals(fileMetadata.stream().filter(FileMetadata::isDirectory).count(), count + 1); Assert.assertEquals(fileMetadata.stream().filter(f -> !f.isDirectory()).count(), count + 1); Assert.assertTrue( expectedRecursive.containsAll(fileMetadata.stream().map(FileMetadata::getFilePath).collect(Collectors.toSet())), fileMetadata.toString()); }
UriEndpoint createUriEndpoint(String url, boolean isWs) { return createUriEndpoint(url, isWs, connectAddress); }
@Test void createUriEndpointRelativeAddressSsl() { String test1 = this.builder.host("example.com") .port(8080) .sslSupport() .build() .createUriEndpoint("/foo", false) .toExternalForm(); String test2 = this.builder.host("example.com") .port(8080) .sslSupport() .build() .createUriEndpoint("/foo", true) .toExternalForm(); assertThat(test1).isEqualTo("https://example.com:8080/foo"); assertThat(test2).isEqualTo("wss://example.com:8080/foo"); }
@Override public int getMaxTablesInSelect() { return 0; }
@Test void assertGetMaxTablesInSelect() { assertThat(metaData.getMaxTablesInSelect(), is(0)); }
public static <K, V> AsMap<K, V> asMap() { return new AsMap<>(false); }
@Test @Category(ValidatesRunner.class) public void testCombinedMapSideInput() { final PCollectionView<Map<String, Integer>> view = pipeline .apply("CreateSideInput", Create.of(KV.of("a", 1), KV.of("a", 20), KV.of("b", 3))) .apply("SumIntegers", Combine.perKey(Sum.ofIntegers())) .apply(View.asMap()); PCollection<KV<String, Integer>> output = pipeline .apply("CreateMainInput", Create.of("apple", "banana", "blackberry")) .apply( "Output", ParDo.of( new DoFn<String, KV<String, Integer>>() { @ProcessElement public void processElement(ProcessContext c) { c.output( KV.of( c.element(), c.sideInput(view).get(c.element().substring(0, 1)))); } }) .withSideInputs(view)); PAssert.that(output) .containsInAnyOrder(KV.of("apple", 21), KV.of("banana", 3), KV.of("blackberry", 3)); pipeline.run(); }
@ApiOperation(value = "Create or update Customer (saveCustomer)", notes = "Creates or Updates the Customer. When creating customer, platform generates Customer Id as " + UUID_WIKI_LINK + "The newly created Customer Id will be present in the response. " + "Specify existing Customer Id to update the Customer. " + "Referencing non-existing Customer Id will cause 'Not Found' error." + "Remove 'id', 'tenantId' from the request body example (below) to create new Customer entity. " + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @RequestMapping(value = "/customer", method = RequestMethod.POST) @ResponseBody public Customer saveCustomer(@io.swagger.v3.oas.annotations.parameters.RequestBody(description = "A JSON value representing the customer.") @RequestBody Customer customer) throws Exception { customer.setTenantId(getTenantId()); checkEntity(customer.getId(), customer, Resource.CUSTOMER); return tbCustomerService.save(customer, getCurrentUser()); }
@Test public void testSaveCustomer() throws Exception { Customer customer = new Customer(); customer.setTitle("My customer"); Mockito.reset(tbClusterService, auditLogService); Customer savedCustomer = doPost("/api/customer", customer, Customer.class); testNotifyEntityAllOneTime(savedCustomer, savedCustomer.getId(), savedCustomer.getId(), savedCustomer.getTenantId(), new CustomerId(CustomerId.NULL_UUID), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ADDED); Assert.assertNotNull(savedCustomer); Assert.assertNotNull(savedCustomer.getId()); Assert.assertTrue(savedCustomer.getCreatedTime() > 0); Assert.assertEquals(customer.getTitle(), savedCustomer.getTitle()); savedCustomer.setTitle("My new customer"); doPost("/api/customer", savedCustomer, Customer.class); testNotifyEntityAllOneTime(savedCustomer, savedCustomer.getId(), savedCustomer.getId(), savedCustomer.getTenantId(), new CustomerId(CustomerId.NULL_UUID), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UPDATED); Customer foundCustomer = doGet("/api/customer/" + savedCustomer.getId().getId().toString(), Customer.class); Assert.assertEquals(foundCustomer.getTitle(), savedCustomer.getTitle()); doDelete("/api/customer/" + savedCustomer.getId().getId().toString()) .andExpect(status().isOk()); }
public static InetAddress findConnectingAddress( InetSocketAddress targetAddress, long maxWaitMillis, long startLoggingAfter) throws IOException { if (targetAddress == null) { throw new NullPointerException("targetAddress must not be null"); } if (maxWaitMillis <= 0) { throw new IllegalArgumentException("Max wait time must be positive"); } final long startTimeNanos = System.nanoTime(); long currentSleepTime = MIN_SLEEP_TIME; long elapsedTimeMillis = 0; final List<AddressDetectionState> strategies = Collections.unmodifiableList( Arrays.asList( AddressDetectionState.LOCAL_HOST, AddressDetectionState.ADDRESS, AddressDetectionState.FAST_CONNECT, AddressDetectionState.SLOW_CONNECT)); // loop while there is time left while (elapsedTimeMillis < maxWaitMillis) { boolean logging = elapsedTimeMillis >= startLoggingAfter; if (logging) { LOG.info("Trying to connect to " + targetAddress); } // Try each strategy in order for (AddressDetectionState strategy : strategies) { InetAddress address = findAddressUsingStrategy(strategy, targetAddress, logging); if (address != null) { return address; } } // we have made a pass with all strategies over all interfaces // sleep for a while before we make the next pass elapsedTimeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000; long toWait = Math.min(maxWaitMillis - elapsedTimeMillis, currentSleepTime); if (toWait > 0) { if (logging) { LOG.info("Could not connect. Waiting for {} msecs before next attempt", toWait); } else { LOG.debug( "Could not connect. Waiting for {} msecs before next attempt", toWait); } try { Thread.sleep(toWait); } catch (InterruptedException e) { throw new IOException("Connection attempts have been interrupted."); } } // increase the exponential backoff timer currentSleepTime = Math.min(2 * currentSleepTime, MAX_SLEEP_TIME); } // our attempts timed out. use the heuristic fallback LOG.warn( "Could not connect to {}. Selecting a local address using heuristics.", targetAddress); InetAddress heuristic = findAddressUsingStrategy(AddressDetectionState.HEURISTIC, targetAddress, true); if (heuristic != null) { return heuristic; } else { LOG.warn( "Could not find any IPv4 address that is not loopback or link-local. Using localhost address."); return InetAddress.getLocalHost(); } }
@Test void testReturnLocalHostAddressUsingHeuristics() throws Exception { // instead of using a unstable localhost:port as "unreachable" to cause Test fails unstably // using a Absolutely unreachable outside ip:port InetSocketAddress unreachable = new InetSocketAddress("8.8.8.8", 0xFFFF); final long start = System.nanoTime(); InetAddress add = ConnectionUtils.findConnectingAddress(unreachable, 2000, 400); // check that it did not take forever (max 30 seconds) // this check can unfortunately not be too tight, or it will be flaky on some CI // infrastructure assertThat(System.nanoTime() - start).isLessThan(30_000_000_000L); // we should have found a heuristic address assertThat(add).isNotNull(); // make sure that we returned the InetAddress.getLocalHost as a heuristic assertThat(add).isEqualTo(InetAddress.getLocalHost()); }
public List<T> findCycle() { resetState(); for (T vertex : graph.getVertices()) { if (colors.get(vertex) == WHITE) { if (visitDepthFirst(vertex, new ArrayList<>(List.of(vertex)))) { if (cycle == null) throw new IllegalStateException("Null cycle - this should never happen"); if (cycle.isEmpty()) throw new IllegalStateException("Empty cycle - this should never happen"); log.log(FINE, () -> "Cycle detected: " + cycle); return cycle; } } } return new ArrayList<>(); }
@Test void graph_with_cycle_returns_cycle() { var graph = new Graph<Vertices>(); graph.edge(A, B); graph.edge(B, C); graph.edge(C, A); var cycleFinder = new CycleFinder<>(graph); assertTrue(cycleFinder.findCycle().containsAll(List.of(A, B, C, A))); }
public String getFormattedMessage() { if (formattedMessage != null) { return formattedMessage; } if (argumentArray != null) { formattedMessage = MessageFormatter.arrayFormat(message, argumentArray).getMessage(); } else { formattedMessage = message; } return formattedMessage; }
@Test public void testFormattingTwoArg() { String message = "{}-{}"; Throwable throwable = null; Object[] argArray = new Object[] { 12, 13 }; LoggingEvent event = new LoggingEvent("", logger, Level.INFO, message, throwable, argArray); assertNull(event.formattedMessage); assertEquals("12-13", event.getFormattedMessage()); }
public static DataTableHasTheSameRowsAs hasTheSameRowsAs(DataTable operand) { return new DataTableHasTheSameRowsAs(operand, true); }
@Test void testHasTheSameRowsAs() { assertTrue(hasTheSameRowsAs(table).matches(identical)); assertTrue(hasTheSameRowsAs(table).matches(shuffled)); assertFalse(hasTheSameRowsAs(table).matches(different)); }
@Override protected String getSnapshotLoadTag() { return SNAPSHOT_LOAD; }
@Test void testGetSnapshotLoadTag() { String snapshotLoadTag = serviceMetadataSnapshotOperation.getSnapshotLoadTag(); assertEquals(snapshotLoadTag, ServiceMetadataSnapshotOperation.class.getSimpleName() + ".LOAD"); }
public static <T> Values<T> of(Iterable<T> elems) { return new Values<>(elems, Optional.absent(), Optional.absent(), false); }
@Test public void testCreateDefaultOutputCoderUsingInference() throws Exception { Coder<Record> coder = new RecordCoder(); p.getCoderRegistry().registerCoderForClass(Record.class, coder); assertThat( p.apply(Create.of(new Record(), new Record(), new Record())).getCoder(), equalTo(coder)); }
@VisibleForTesting static boolean areProxyPropertiesSet(String protocol) { return PROXY_PROPERTIES.stream() .anyMatch(property -> System.getProperty(protocol + "." + property) != null); }
@Test public void testAreProxyPropertiesSet_httpPortSet() { System.setProperty("http.proxyPort", "port"); Assert.assertTrue(MavenSettingsProxyProvider.areProxyPropertiesSet("http")); Assert.assertFalse(MavenSettingsProxyProvider.areProxyPropertiesSet("https")); }
public static String getPathWithoutScheme(Path path) { return path.toUri().getPath(); }
@Test public void testGetPathWithoutSchemaFromHDFSURI() { final Path path = new Path(URI.create("hdfs://localhost:1234/foo/bar/baz?please=dont&show=up")); final String output = HadoopUtils.getPathWithoutScheme(path); assertEquals("/foo/bar/baz", output); }
@Override public int read() throws EOFException { return (pos < size) ? (data[pos++] & 0xff) : -1; }
@Test(expected = IndexOutOfBoundsException.class) public void testReadForBOffLen_negativeLen() throws Exception { in.read(INIT_DATA, 0, -11); }
static boolean isDifferent(Map<String, String> current, Map<String, String> desired) { return !current.equals(desired); }
@Test public void testIsDifferent() { Map<String, String> current = new HashMap<>(3); current.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); current.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); current.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); Map<String, String> desired = new HashMap<>(3); desired.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired.put("server.4", "my-cluster-zookeeper-3.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.isDifferent(current, desired), is(true)); Map<String, String> desired2 = new HashMap<>(3); desired2.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired2.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.isDifferent(current, desired2), is(true)); Map<String, String> desired3 = new HashMap<>(3); desired3.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired3.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.isDifferent(current, desired3), is(true)); }
@NonNull public String getEndOfLifeDate() { return endOfLifeDate; }
@Test public void testGetEndOfLifeDate() { assertThat(monitor.getEndOfLifeDate(), is("2099-12-31")); }
protected Object createAndFillObject(ObjectNode json, Object toReturn, String className, List<String> genericClasses) { Iterator<Map.Entry<String, JsonNode>> fields = json.fields(); while (fields.hasNext()) { Map.Entry<String, JsonNode> element = fields.next(); String key = element.getKey(); JsonNode jsonNode = element.getValue(); if (isSimpleTypeNode(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(getSimpleTypeNodeTextValue(jsonNode), fieldDescriptor.getKey())); } else if (jsonNode.isArray()) { List<Object> nestedList = new ArrayList<>(); Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); List<Object> returnedList = createAndFillList((ArrayNode) jsonNode, nestedList, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedList); } else if (jsonNode.isObject()) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); Object nestedObject = createObject(fieldDescriptor.getKey(), fieldDescriptor.getValue()); Object returnedObject = createAndFillObject((ObjectNode) jsonNode, nestedObject, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedObject); } else if (!isEmptyText(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(jsonNode.textValue(), fieldDescriptor.getKey())); } else { // empty strings are skipped } } return toReturn; }
@Test public void convertObject_singleLevel() { ObjectNode objectNode = new ObjectNode(factory); objectNode.put("age", "1"); objectNode.put("name", "FS"); Object result = expressionEvaluator.createAndFillObject(objectNode, new HashMap<>(), Map.class.getCanonicalName(), List.of(String.class.getCanonicalName())); assertThat(result).isInstanceOf(Map.class); Map<String, Object> resultMap = (Map<String, Object>) result; assertThat(resultMap).hasSize(2).containsEntry("age", "1").containsEntry("name", "FS"); }
@Override public Double getNumber( Object object ) throws KettleValueException { Long l = getInteger( object ); if ( l == null ) { return null; } return l.doubleValue(); }
@Test public void testGetNumber_Success() throws UnknownHostException, KettleValueException { ValueMetaInternetAddress vm = new ValueMetaInternetAddress(); String[] addresses = { // Some IPv4 addresses "192.168.10.0", "0.0.0.1", "0.0.0.0", "127.0.0.1", "255.255.0.10", "192.0.2.235" }; // No exception should be thrown in any of the following calls for ( String address : addresses ) { InetAddress addr = InetAddress.getByName( address ); vm.getNumber( addr ); } }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldUseExplicitInterNodeListenerSetToUnresolvableHost() { // Given: final URL expected = url("https://unresolvable.host:12345"); final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .putAll(MIN_VALID_CONFIGS) .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) .build() ); // When: final URL actual = config.getInterNodeListener(portResolver, logger); // Then: assertThat(actual, is(expected)); verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); verifyNoMoreInteractions(logger); }
public Path suffix(String suffix) { return new Path(getParent(), getName() + suffix); }
@Test void testSuffix() { Path p = new Path("/my/path"); p = p.suffix("_123"); assertThat(p.toUri().getPath()).isEqualTo("/my/path_123"); p = new Path("/my/path/"); p = p.suffix("/abc"); assertThat(p.toUri().getPath()).isEqualTo("/my/path/abc"); p = new Path("C:/my/windows/path"); p = p.suffix("/abc"); assertThat(p.toUri().getPath()).isEqualTo("/C:/my/windows/path/abc"); }
public void onClose() { if (asyncTaskExecutor instanceof ExecutorService) { try { final ExecutorService executor = (ExecutorService)asyncTaskExecutor; executor.shutdownNow(); if (!executor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { ctx.errorHandler().onError(new AeronEvent("failed to shutdown async task executor")); } } catch (final Exception e) { ctx.errorHandler().onError(e); } } CloseHelper.close(ctx.errorHandler(), nameResolver); publicationImages.forEach(PublicationImage::free); networkPublications.forEach(NetworkPublication::free); ipcPublications.forEach(IpcPublication::free); freeEndOfLifeResources(Integer.MAX_VALUE); toDriverCommands.consumerHeartbeatTime(Aeron.NULL_VALUE); ctx.cncByteBuffer().force(); ctx.close(); }
@Test void onCloseMustShutdownAsyncExecutor(@TempDir final Path dir) throws InterruptedException { final ExecutorService asyncTaskExecutor = mock(ExecutorService.class); final DriverConductor conductor = new DriverConductor(ctx.clone() .cncByteBuffer(IoUtil.mapNewFile(dir.resolve("some.txt").toFile(), 64)) .asyncTaskExecutor(asyncTaskExecutor)); conductor.onClose(); final InOrder inOrder = inOrder(asyncTaskExecutor); inOrder.verify(asyncTaskExecutor).shutdownNow(); inOrder.verify(asyncTaskExecutor).awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, SECONDS); inOrder.verifyNoMoreInteractions(); }
public static Path getCacheHome() { return getCacheHome(System.getProperties(), System.getenv()); }
@Test public void testGetCacheHome_mac() throws IOException { Path libraryApplicationSupport = Paths.get(fakeCacheHome, "Library", "Caches"); Files.createDirectories(libraryApplicationSupport); Properties fakeProperties = new Properties(); fakeProperties.setProperty("user.home", fakeCacheHome); fakeProperties.setProperty("os.name", "os is mAc or DaRwIn"); Assert.assertEquals( libraryApplicationSupport.resolve("Google").resolve("Jib"), XdgDirectories.getCacheHome(fakeProperties, Collections.emptyMap())); }
public void addRequestForSystemError(String requestName, long duration, int cpuTime, int allocatedKBytes, String stackTrace) { // comme la méthode addRequest, cette méthode n'est pas synchronisée pour ne pas avoir // de synchronisation globale à l'application sur cette instance d'objet // ce qui pourrait faire une contention et des ralentissements, // par contre on synchronise request et errors assert requestName != null; assert duration >= -1; // -1 pour le counter de log assert cpuTime >= -1; // on ne doit conserver les stackTraces que pour les compteurs d'erreurs et de logs plus limités en taille // car sinon cela risquerait de donner des compteurs trop gros en mémoire et sur disque assert errorCounter; // le code ci-après suppose qu'il n'y a pas de contexte courant pour les erreurs systèmes // contrairement à la méthode addRequest assert contextThreadLocal.get() == null; final String aggregateRequestName = getAggregateRequestName(requestName); final CounterRequest request = getCounterRequestInternal(aggregateRequestName); synchronized (request) { request.addHit(duration, cpuTime, allocatedKBytes, true, stackTrace, -1); } synchronized (errors) { errors.addLast(new CounterError(requestName, stackTrace)); if (errors.size() > MAX_ERRORS_COUNT) { errors.removeFirst(); } } }
@Test public void testAddRequestForSystemError() { final CounterRequest request = createCounterRequest(); final Counter errorCounter = new Counter(Counter.ERROR_COUNTER_NAME, null); errorCounter.setMaxRequestsCount(200); errorCounter.addRequestForSystemError(request.getName(), request.getMean(), 0, 0, null); final List<CounterRequest> before = errorCounter.getOrderedRequests(); errorCounter.addRequestForSystemError(request.getName(), request.getMean(), 0, 0, "stacktrace"); final List<CounterRequest> after = errorCounter.getOrderedRequests(); after.get(0).removeHits(request); // on teste le contenu des CounterRequest par le contenu de toString assertEquals("error requests", before.toString(), after.toString()); int i = 0; while (errorCounter.getRequestsCount() < Counter.MAX_ERRORS_COUNT) { errorCounter.addRequestForSystemError("request a" + i, 1, 0, 0, null); i++; } errorCounter.addRequestForSystemError("request a" + i, 1, 0, 0, null); errorCounter.clear(); i = 0; while (errorCounter.getRequestsCount() < Counter.MAX_ERRORS_COUNT) { errorCounter.bindContextIncludingCpu("request b" + i); errorCounter.addRequestForCurrentContext("stack trace"); i++; } errorCounter.bindContextIncludingCpu("request b" + i); errorCounter.addRequestForCurrentContext("stack trace"); // addRequestForCurrentContext mais sans contexte courant errorCounter.addRequestForCurrentContext("stack trace"); errorCounter.addRequestForCurrentContext(true); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void optifineIsNotCompatibleWithForge1() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/optifine_is_not_compatible_with_forge2.txt")), CrashReportAnalyzer.Rule.OPTIFINE_IS_NOT_COMPATIBLE_WITH_FORGE); }
public static String post(String url, Map<String, Object> params) throws Exception { return post(url, JSON.toJSONString(params), CONNECT_TIMEOUT_DEFAULT_IN_MILL, SOCKET_TIMEOUT_DEFAULT_IN_MILL); }
@Test public void testSimpleCase() throws Exception { String url = "https://httpbin.org/post"; Map<String, Object> params = new HashMap<String, Object>(); params.put("foo", "bar"); String rsp = HttpUtils.post(url, params); System.out.println(rsp); Assert.assertNotNull(rsp); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/{executionId}") @Operation(tags = {"Executions"}, summary = "Get an execution") public Execution get( @Parameter(description = "The execution id") @PathVariable String executionId ) { return executionRepository .findById(tenantService.resolveTenant(), executionId) .orElse(null); }
@Test void webhookDynamicKey() { Execution execution = client.toBlocking().retrieve( GET( "/api/v1/executions/webhook/" + TESTS_FLOW_NS + "/webhook-dynamic-key/webhook-dynamic-key" ), Execution.class ); assertThat(execution, notNullValue()); assertThat(execution.getId(), notNullValue()); }
@Override public List<PartitionInfo> partitionsFor(String topic) { Objects.requireNonNull(topic, "topic cannot be null"); try { return waitOnMetadata(topic, null, time.milliseconds(), maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } }
@Test public void testPartitionsForWithNullTopic() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) { assertThrows(NullPointerException.class, () -> producer.partitionsFor(null)); } }
public static Builder newBuilder(String name) { return new Builder(name); }
@Test void testBuildSlotSharingGroupWithoutAllRequiredConfig() { assertThatThrownBy( () -> SlotSharingGroup.newBuilder("ssg") .setCpuCores(1) .setTaskOffHeapMemoryMB(10) .build()) .isInstanceOf(IllegalArgumentException.class); }
@Override public V poll(long timeout, TimeUnit unit) throws InterruptedException { return commandExecutor.getInterrupted(pollAsync(timeout, unit)); }
@Test public void testPoll() throws InterruptedException { RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("queue1"); assertThat(queue1.trySetCapacity(10)).isTrue(); queue1.put(1); Assertions.assertEquals((Integer)1, queue1.poll(2, TimeUnit.SECONDS)); long s = System.currentTimeMillis(); Assertions.assertNull(queue1.poll(5, TimeUnit.SECONDS)); Assertions.assertTrue(System.currentTimeMillis() - s > 5000); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { switch (request.getCode()) { case RequestCode.UPDATE_AND_CREATE_TOPIC: return this.updateAndCreateTopic(ctx, request); case RequestCode.UPDATE_AND_CREATE_TOPIC_LIST: return this.updateAndCreateTopicList(ctx, request); case RequestCode.DELETE_TOPIC_IN_BROKER: return this.deleteTopic(ctx, request); case RequestCode.GET_ALL_TOPIC_CONFIG: return this.getAllTopicConfig(ctx, request); case RequestCode.GET_TIMER_CHECK_POINT: return this.getTimerCheckPoint(ctx, request); case RequestCode.GET_TIMER_METRICS: return this.getTimerMetrics(ctx, request); case RequestCode.UPDATE_BROKER_CONFIG: return this.updateBrokerConfig(ctx, request); case RequestCode.GET_BROKER_CONFIG: return this.getBrokerConfig(ctx, request); case RequestCode.UPDATE_COLD_DATA_FLOW_CTR_CONFIG: return this.updateColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.REMOVE_COLD_DATA_FLOW_CTR_CONFIG: return this.removeColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.GET_COLD_DATA_FLOW_CTR_INFO: return this.getColdDataFlowCtrInfo(ctx); case RequestCode.SET_COMMITLOG_READ_MODE: return this.setCommitLogReadaheadMode(ctx, request); case RequestCode.SEARCH_OFFSET_BY_TIMESTAMP: return this.searchOffsetByTimestamp(ctx, request); case RequestCode.GET_MAX_OFFSET: return this.getMaxOffset(ctx, request); case RequestCode.GET_MIN_OFFSET: return this.getMinOffset(ctx, request); case RequestCode.GET_EARLIEST_MSG_STORETIME: return this.getEarliestMsgStoretime(ctx, request); case RequestCode.GET_BROKER_RUNTIME_INFO: return this.getBrokerRuntimeInfo(ctx, request); case RequestCode.LOCK_BATCH_MQ: return this.lockBatchMQ(ctx, request); case RequestCode.UNLOCK_BATCH_MQ: return this.unlockBatchMQ(ctx, request); case RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP: return this.updateAndCreateSubscriptionGroup(ctx, request); case RequestCode.GET_ALL_SUBSCRIPTIONGROUP_CONFIG: return this.getAllSubscriptionGroup(ctx, request); case RequestCode.DELETE_SUBSCRIPTIONGROUP: return this.deleteSubscriptionGroup(ctx, request); case RequestCode.GET_TOPIC_STATS_INFO: return this.getTopicStatsInfo(ctx, request); case RequestCode.GET_CONSUMER_CONNECTION_LIST: return this.getConsumerConnectionList(ctx, request); case RequestCode.GET_PRODUCER_CONNECTION_LIST: return this.getProducerConnectionList(ctx, request); case RequestCode.GET_ALL_PRODUCER_INFO: return this.getAllProducerInfo(ctx, request); case RequestCode.GET_CONSUME_STATS: return this.getConsumeStats(ctx, request); case RequestCode.GET_ALL_CONSUMER_OFFSET: return this.getAllConsumerOffset(ctx, request); case RequestCode.GET_ALL_DELAY_OFFSET: return this.getAllDelayOffset(ctx, request); case RequestCode.GET_ALL_MESSAGE_REQUEST_MODE: return this.getAllMessageRequestMode(ctx, request); case RequestCode.INVOKE_BROKER_TO_RESET_OFFSET: return this.resetOffset(ctx, request); case RequestCode.INVOKE_BROKER_TO_GET_CONSUMER_STATUS: return this.getConsumerStatus(ctx, request); case RequestCode.QUERY_TOPIC_CONSUME_BY_WHO: return this.queryTopicConsumeByWho(ctx, request); case RequestCode.QUERY_TOPICS_BY_CONSUMER: return this.queryTopicsByConsumer(ctx, request); case RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER: return this.querySubscriptionByConsumer(ctx, request); case RequestCode.QUERY_CONSUME_TIME_SPAN: return this.queryConsumeTimeSpan(ctx, request); case RequestCode.GET_SYSTEM_TOPIC_LIST_FROM_BROKER: return this.getSystemTopicListFromBroker(ctx, request); case RequestCode.CLEAN_EXPIRED_CONSUMEQUEUE: return this.cleanExpiredConsumeQueue(); case RequestCode.DELETE_EXPIRED_COMMITLOG: return this.deleteExpiredCommitLog(); case RequestCode.CLEAN_UNUSED_TOPIC: return this.cleanUnusedTopic(); case RequestCode.GET_CONSUMER_RUNNING_INFO: return this.getConsumerRunningInfo(ctx, request); case RequestCode.QUERY_CORRECTION_OFFSET: return this.queryCorrectionOffset(ctx, request); case RequestCode.CONSUME_MESSAGE_DIRECTLY: return this.consumeMessageDirectly(ctx, request); case RequestCode.CLONE_GROUP_OFFSET: return this.cloneGroupOffset(ctx, request); case RequestCode.VIEW_BROKER_STATS_DATA: return ViewBrokerStatsData(ctx, request); case RequestCode.GET_BROKER_CONSUME_STATS: return fetchAllConsumeStatsInBroker(ctx, request); case RequestCode.QUERY_CONSUME_QUEUE: return queryConsumeQueue(ctx, request); case RequestCode.UPDATE_AND_GET_GROUP_FORBIDDEN: return this.updateAndGetGroupForbidden(ctx, request); case RequestCode.GET_SUBSCRIPTIONGROUP_CONFIG: return this.getSubscriptionGroup(ctx, request); case RequestCode.UPDATE_AND_CREATE_ACL_CONFIG: return updateAndCreateAccessConfig(ctx, request); case RequestCode.DELETE_ACL_CONFIG: return deleteAccessConfig(ctx, request); case RequestCode.GET_BROKER_CLUSTER_ACL_INFO: return getBrokerAclConfigVersion(ctx, request); case RequestCode.UPDATE_GLOBAL_WHITE_ADDRS_CONFIG: return updateGlobalWhiteAddrsConfig(ctx, request); case RequestCode.RESUME_CHECK_HALF_MESSAGE: return resumeCheckHalfMessage(ctx, request); case RequestCode.GET_TOPIC_CONFIG: return getTopicConfig(ctx, request); case RequestCode.UPDATE_AND_CREATE_STATIC_TOPIC: return this.updateAndCreateStaticTopic(ctx, request); case RequestCode.NOTIFY_MIN_BROKER_ID_CHANGE: return this.notifyMinBrokerIdChange(ctx, request); case RequestCode.EXCHANGE_BROKER_HA_INFO: return this.updateBrokerHaInfo(ctx, request); case RequestCode.GET_BROKER_HA_STATUS: return this.getBrokerHaStatus(ctx, request); case RequestCode.RESET_MASTER_FLUSH_OFFSET: return this.resetMasterFlushOffset(ctx, request); case RequestCode.GET_BROKER_EPOCH_CACHE: return this.getBrokerEpochCache(ctx, request); case RequestCode.NOTIFY_BROKER_ROLE_CHANGED: return this.notifyBrokerRoleChanged(ctx, request); case RequestCode.AUTH_CREATE_USER: return this.createUser(ctx, request); case RequestCode.AUTH_UPDATE_USER: return this.updateUser(ctx, request); case RequestCode.AUTH_DELETE_USER: return this.deleteUser(ctx, request); case RequestCode.AUTH_GET_USER: return this.getUser(ctx, request); case RequestCode.AUTH_LIST_USER: return this.listUser(ctx, request); case RequestCode.AUTH_CREATE_ACL: return this.createAcl(ctx, request); case RequestCode.AUTH_UPDATE_ACL: return this.updateAcl(ctx, request); case RequestCode.AUTH_DELETE_ACL: return this.deleteAcl(ctx, request); case RequestCode.AUTH_GET_ACL: return this.getAcl(ctx, request); case RequestCode.AUTH_LIST_ACL: return this.listAcl(ctx, request); default: return getUnknownCmdResponse(ctx, request); } }
@Test public void testProcessRequest_UpdateConfigPath() throws RemotingCommandException { final RemotingCommand updateConfigRequest = RemotingCommand.createRequestCommand(RequestCode.UPDATE_BROKER_CONFIG, null); Properties properties = new Properties(); ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); when(ctx.channel()).thenReturn(null); // Update allowed value properties.setProperty("allAckInSyncStateSet", "true"); updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8)); RemotingCommand response = adminBrokerProcessor.processRequest(ctx, updateConfigRequest); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); //update disallowed value properties.clear(); properties.setProperty("brokerConfigPath", "test/path"); updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8)); response = adminBrokerProcessor.processRequest(ctx, updateConfigRequest); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.NO_PERMISSION); assertThat(response.getRemark()).contains("Can not update config in black list."); //update disallowed value properties.clear(); properties.setProperty("configBlackList", "test;path"); updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8)); response = adminBrokerProcessor.processRequest(ctx, updateConfigRequest); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.NO_PERMISSION); assertThat(response.getRemark()).contains("Can not update config in black list."); }
public static ShenyuSdkClient newInstance(final String clientType) { return SDK_CLIENT_MAP.computeIfAbsent(clientType, ExtensionLoader.getExtensionLoader(ShenyuSdkClient.class)::getJoin); }
@Test public void testNewInstance() { assertNotNull(ShenyuSdkClientFactory.newInstance("httpclient")); try (MockedStatic<ExtensionLoader> mocked = mockStatic(ExtensionLoader.class)) { ExtensionLoader extensionLoader = mock(ExtensionLoader.class); mocked.when(() -> ExtensionLoader.getExtensionLoader(ShenyuSdkClient.class)) .thenReturn(extensionLoader); when(extensionLoader.getJoin("clientType")).thenReturn( mock(ShenyuSdkClient.class)); assertNotNull(ShenyuSdkClientFactory.newInstance("clientType")); } }