focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public CompletableFuture<List<MessageExt>> queryMessage(String address, boolean uniqueKeyFlag, boolean decompressBody, QueryMessageRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<List<MessageExt>> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_MESSAGE, requestHeader); request.addExtField(MixAll.UNIQUE_MSG_QUERY_FLAG, String.valueOf(uniqueKeyFlag)); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { List<MessageExt> wrappers = MessageDecoder.decodesBatch(ByteBuffer.wrap(response.getBody()), true, decompressBody, true); future.complete(filterMessages(wrappers, requestHeader.getTopic(), requestHeader.getKey(), uniqueKeyFlag)); } else if (response.getCode() == ResponseCode.QUERY_NOT_FOUND) { List<MessageExt> wrappers = new ArrayList<>(); future.complete(wrappers); } else { log.warn("queryMessage getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertQueryMessageWithSuccess() throws Exception { setResponseSuccess(getMessageResult()); QueryMessageRequestHeader requestHeader = mock(QueryMessageRequestHeader.class); when(requestHeader.getTopic()).thenReturn(defaultTopic); when(requestHeader.getKey()).thenReturn("keys"); CompletableFuture<List<MessageExt>> actual = mqClientAdminImpl.queryMessage(defaultBrokerAddr, false, false, requestHeader, defaultTimeout); List<MessageExt> messageExtList = actual.get(); assertNotNull(messageExtList); assertEquals(1, messageExtList.size()); }
public static ApiVersionCollection intersectForwardableApis( final ApiMessageType.ListenerType listenerType, final RecordVersion minRecordVersion, final Map<ApiKeys, ApiVersion> activeControllerApiVersions, boolean enableUnstableLastVersion, boolean clientTelemetryEnabled ) { ApiVersionCollection apiKeys = new ApiVersionCollection(); for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) { if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) { final Optional<ApiVersion> brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion); if (!brokerApiVersion.isPresent()) { // Broker does not support this API key. continue; } // Skip telemetry APIs if client telemetry is disabled. if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled) continue; final ApiVersion finalApiVersion; if (!apiKey.forwardable) { finalApiVersion = brokerApiVersion.get(); } else { Optional<ApiVersion> intersectVersion = intersect( brokerApiVersion.get(), activeControllerApiVersions.getOrDefault(apiKey, null) ); if (intersectVersion.isPresent()) { finalApiVersion = intersectVersion.get(); } else { // Controller doesn't support this API key, or there is no intersection. continue; } } apiKeys.add(finalApiVersion.duplicate()); } } return apiKeys; }
@Test public void shouldHaveCommonlyAgreedApiVersionResponseWithControllerOnForwardableAPIs() { final ApiKeys forwardableAPIKey = ApiKeys.CREATE_ACLS; final ApiKeys nonForwardableAPIKey = ApiKeys.JOIN_GROUP; final short minVersion = 0; final short maxVersion = 1; Map<ApiKeys, ApiVersion> activeControllerApiVersions = Utils.mkMap( Utils.mkEntry(forwardableAPIKey, new ApiVersion() .setApiKey(forwardableAPIKey.id) .setMinVersion(minVersion) .setMaxVersion(maxVersion)), Utils.mkEntry(nonForwardableAPIKey, new ApiVersion() .setApiKey(nonForwardableAPIKey.id) .setMinVersion(minVersion) .setMaxVersion(maxVersion)) ); ApiVersionCollection commonResponse = ApiVersionsResponse.intersectForwardableApis( ApiMessageType.ListenerType.ZK_BROKER, RecordVersion.current(), activeControllerApiVersions, true, false ); verifyVersions(forwardableAPIKey.id, minVersion, maxVersion, commonResponse); verifyVersions(nonForwardableAPIKey.id, ApiKeys.JOIN_GROUP.oldestVersion(), ApiKeys.JOIN_GROUP.latestVersion(), commonResponse); }
@Override public DataflowMapTaskExecutor create( MutableNetwork<Node, Edge> network, PipelineOptions options, String stageName, ReaderFactory readerFactory, SinkFactory sinkFactory, DataflowExecutionContext<?> executionContext, CounterSet counterSet, IdGenerator idGenerator) { // Swap out all the InstructionOutput nodes with OutputReceiver nodes Networks.replaceDirectedNetworkNodes( network, createOutputReceiversTransform(stageName, counterSet)); // Swap out all the ParallelInstruction nodes with Operation nodes Networks.replaceDirectedNetworkNodes( network, createOperationTransformForParallelInstructionNodes( stageName, network, options, readerFactory, sinkFactory, executionContext)); // Collect all the operations within the network and attach all the operations as receivers // to preceding output receivers. List<Operation> topoSortedOperations = new ArrayList<>(); for (OperationNode node : Iterables.filter(Networks.topologicalOrder(network), OperationNode.class)) { topoSortedOperations.add(node.getOperation()); for (Node predecessor : Iterables.filter(network.predecessors(node), OutputReceiverNode.class)) { ((OutputReceiverNode) predecessor) .getOutputReceiver() .addOutput((Receiver) node.getOperation()); } } if (LOG.isDebugEnabled()) { LOG.info("Map task network: {}", Networks.toDot(network)); } return IntrinsicMapTaskExecutor.withSharedCounterSet( topoSortedOperations, counterSet, executionContext.getExecutionStateTracker()); }
@Test public void testExecutionContextPlumbing() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( createReadInstruction("Read", ReaderFactoryTest.SingletonTestReaderFactory.class), createParDoInstruction(0, 0, "DoFn1", "DoFnUserName"), createParDoInstruction(1, 0, "DoFnWithContext", "DoFnWithContextUserName")); MapTask mapTask = new MapTask(); mapTask.setStageName(STAGE); mapTask.setInstructions(instructions); mapTask.setFactory(Transport.getJsonFactory()); BatchModeExecutionContext context = BatchModeExecutionContext.forTesting(options, counterSet, "testStage"); try (DataflowMapTaskExecutor executor = mapTaskExecutorFactory.create( mapTaskToNetwork.apply(mapTask), options, STAGE, readerRegistry, sinkRegistry, context, counterSet, idGenerator)) { executor.execute(); } List<String> stepNames = new ArrayList<>(); for (BatchModeExecutionContext.StepContext stepContext : context.getAllStepContexts()) { stepNames.add(stepContext.getNameContext().systemName()); } assertThat(stepNames, hasItems("DoFn1", "DoFnWithContext")); }
public <T> List<T> getList(String path) { return get(path); }
@Test public void can_manually_escape_class_property() { // Given String json = "{\n" + " \"semester\": \"Fall 2015\",\n" + " \"groups\": [\n" + " {\n" + " \"siteUrl\": \"http://cphbusinessjb.cloudapp.net/CA2/\",\n" + " \"error\": \"NO AUTHOR/CLASS-INFO\"\n" + " },\n" + " {\n" + " \"siteUrl\": \"http://ca2-ebski.rhcloud.com/CA2New/\",\n" + " \"authors\": \"Ebbe, Kasper, Christoffer\",\n" + " \"class\": \"A klassen\",\n" + " \"group\": \"Gruppe: Johns Llama Herders A/S\"\n" + " },\n" + " {\n" + " \"siteUrl\": \"http://ca2-chrislind.rhcloud.com/CA2Final/\",\n" + " \"error\": \"NO AUTHOR/CLASS-INFO\"\n" + " },\n" + " {\n" + " \"siteUrl\": \"http://ca2-pernille.rhcloud.com/NYCA2/\",\n" + " \"authors\": \"Marta, Jeanette, Pernille\",\n" + " \"class\": \"DAT A\",\n" + " \"group\": \"Group: MJP\"\n" + " },\n" + " {\n" + " \"siteUrl\": \"https://ca2-afn.rhcloud.com:8443/company.jsp\",\n" + " \"error\": \"NO AUTHOR/CLASS-INFO\"\n" + " },\n" + " {\n" + " \"siteUrl\": \"http://ca-smcphbusiness.rhcloud.com/ca2/index.jsp\",\n" + " \"authors\": \"Mikkel, Steffen, B Andersen\",\n" + " \"class\": \"A Class Computer Science\",\n" + " \"group\": \"1\"\n" + " }\n" + " ]\n" + "}"; // When JsonPath jsonPath = new JsonPath(json); // Then assertThat(jsonPath.getList("groups.getAt('class')", String.class), hasItems("A klassen")); }
@Override public Class<? extends ObjectFactory> getObjectFactoryClass() { return configurationParameters .get(OBJECT_FACTORY_PROPERTY_NAME, ObjectFactoryParser::parseObjectFactory) .orElse(null); }
@Test void objectFactory() { ConfigurationParameters configurationParameters = new MapConfigurationParameters( Constants.OBJECT_FACTORY_PROPERTY_NAME, DefaultObjectFactory.class.getName()); assertThat(new CucumberEngineOptions(configurationParameters).getObjectFactoryClass(), is(DefaultObjectFactory.class)); }
@Override public Collection<ParameterRewriter> getParameterRewriters() { Collection<ParameterRewriter> result = new LinkedList<>(); addParameterRewriter(result, new GeneratedKeyInsertValueParameterRewriter()); addParameterRewriter(result, new ShardingPaginationParameterRewriter()); return result; }
@Test void assertGetParameterRewritersWhenPaginationIsNeedRewrite() { SelectStatementContext statementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(statementContext.getPaginationContext().isHasPagination()).thenReturn(true); Collection<ParameterRewriter> actual = new ShardingParameterRewriterBuilder( mock(RouteContext.class), Collections.singletonMap("test", mock(ShardingSphereSchema.class)), statementContext).getParameterRewriters(); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), instanceOf(ShardingPaginationParameterRewriter.class)); }
@Override @Nonnull public <T> Future<T> submit(@Nonnull Callable<T> task) { throwRejectedExecutionExceptionIfShutdown(); try { T result = task.call(); return new CompletedFuture<>(result, null); } catch (Exception e) { return new CompletedFuture<>(null, e); } }
@Test void testSubmitCallable() { final CompletableFuture<Thread> future = new CompletableFuture<>(); testTaskSubmissionBeforeShutdown( testInstance -> testInstance.submit(callableFromFuture(future))); assertThat(future).isCompletedWithValue(Thread.currentThread()); }
public static Object convertAvroFormat( FieldType beamFieldType, Object avroValue, BigQueryUtils.ConversionOptions options) { TypeName beamFieldTypeName = beamFieldType.getTypeName(); if (avroValue == null) { if (beamFieldType.getNullable()) { return null; } else { throw new IllegalArgumentException(String.format("Field %s not nullable", beamFieldType)); } } switch (beamFieldTypeName) { case BYTE: case INT16: case INT32: case INT64: case FLOAT: case DOUBLE: case STRING: case BYTES: case BOOLEAN: return convertAvroPrimitiveTypes(beamFieldTypeName, avroValue); case DATETIME: // Expecting value in microseconds. switch (options.getTruncateTimestamps()) { case TRUNCATE: return truncateToMillis(avroValue); case REJECT: return safeToMillis(avroValue); default: throw new IllegalArgumentException( String.format( "Unknown timestamp truncation option: %s", options.getTruncateTimestamps())); } case DECIMAL: return convertAvroNumeric(avroValue); case ARRAY: return convertAvroArray(beamFieldType, avroValue, options); case LOGICAL_TYPE: LogicalType<?, ?> logicalType = beamFieldType.getLogicalType(); assert logicalType != null; String identifier = logicalType.getIdentifier(); if (SqlTypes.DATE.getIdentifier().equals(identifier)) { return convertAvroDate(avroValue); } else if (SqlTypes.TIME.getIdentifier().equals(identifier)) { return convertAvroTime(avroValue); } else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) { return convertAvroDateTime(avroValue); } else if (SQL_DATE_TIME_TYPES.contains(identifier)) { switch (options.getTruncateTimestamps()) { case TRUNCATE: return truncateToMillis(avroValue); case REJECT: return safeToMillis(avroValue); default: throw new IllegalArgumentException( String.format( "Unknown timestamp truncation option: %s", options.getTruncateTimestamps())); } } else if (logicalType instanceof PassThroughLogicalType) { return convertAvroFormat(logicalType.getBaseType(), avroValue, options); } else { throw new RuntimeException("Unknown logical type " + identifier); } case ROW: Schema rowSchema = beamFieldType.getRowSchema(); if (rowSchema == null) { throw new IllegalArgumentException("Nested ROW missing row schema"); } GenericData.Record record = (GenericData.Record) avroValue; return toBeamRow(record, rowSchema, options); case MAP: return convertAvroRecordToMap(beamFieldType, avroValue, options); default: throw new RuntimeException( "Does not support converting unknown type value: " + beamFieldTypeName); } }
@Test public void testBytesType() { byte[] bytes = "hello".getBytes(StandardCharsets.UTF_8); assertThat( BigQueryUtils.convertAvroFormat(FieldType.BYTES, ByteBuffer.wrap(bytes), REJECT_OPTIONS), equalTo(bytes)); }
@Override public boolean isFile(String path) throws IOException { path = stripPath(path); File file = new File(path); return file.isFile(); }
@Test public void isFile() throws IOException { String dirpath = PathUtils.concatPath(mLocalUfsRoot, getUniqueFileName()); mLocalUfs.mkdirs(dirpath); assertFalse(mLocalUfs.isFile(dirpath)); String filepath = PathUtils.concatPath(mLocalUfsRoot, getUniqueFileName()); mLocalUfs.create(filepath).close(); assertTrue(mLocalUfs.isFile(filepath)); }
public boolean isTooSoon() { long now = getCurrentTime(); if (now > next) { next = now + getBackoffCoefficient(); return false; } else { return true; } }
@Test public void recoveryNotNeededAfterInit() { RecoveryCoordinator rc = new RecoveryCoordinator(); assertTrue(rc.isTooSoon()); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testGetName() { assertThat(analyzer.getName(), is("Ruby Gemspec Analyzer")); }
@Override public boolean remove(Object o) { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void test_removeInteger() { set.remove(Integer.valueOf(3)); }
@Override public Job save(Job jobToSave) { try (final Connection conn = dataSource.getConnection(); final Transaction transaction = new Transaction(conn)) { final Job savedJob = jobTable(conn).save(jobToSave); transaction.commit(); notifyJobStatsOnChangeListeners(); return savedJob; } catch (SQLException e) { throw new StorageException(e); } }
@Test void saveJobs() throws SQLException { when(preparedStatement.executeBatch()).thenReturn(new int[]{1}); assertThatCode(() -> jobStorageProvider.save(singletonList(anEnqueuedJob().build()))).doesNotThrowAnyException(); }
public InetAddress resolve(final String name, final String uriParamName, final boolean isReResolution) { final long beginNs = clock.nanoTime(); maxTimeTracker.update(beginNs); InetAddress address = null; try { address = delegateResolver.resolve(name, uriParamName, isReResolution); return address; } finally { final long endNs = clock.nanoTime(); maxTimeTracker.measureAndUpdate(endNs); logResolve(delegateResolver.getClass().getSimpleName(), endNs - beginNs, name, isReResolution, address); } }
@Test void resolveShouldMeasureExecutionTime() { final NameResolver delegateResolver = mock(NameResolver.class); when(delegateResolver.resolve(anyString(), anyString(), anyBoolean())) .thenAnswer(invocation -> InetAddress.getByName(invocation.getArgument(0))); final NanoClock clock = mock(NanoClock.class); final long beginNs = SECONDS.toNanos(1); final long endNs = SECONDS.toNanos(9); when(clock.nanoTime()).thenReturn(beginNs, endNs); final DutyCycleTracker maxTime = mock(DutyCycleTracker.class); final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime); final String name = "localhost"; final String endpoint = "endpoint"; final boolean isReLookup = true; assertEquals(InetAddress.getLoopbackAddress(), resolver.resolve(name, endpoint, isReLookup)); final InOrder inOrder = inOrder(delegateResolver, clock, maxTime); inOrder.verify(clock).nanoTime(); inOrder.verify(maxTime).update(beginNs); inOrder.verify(delegateResolver).resolve(name, endpoint, isReLookup); inOrder.verify(clock).nanoTime(); inOrder.verify(maxTime).measureAndUpdate(endNs); inOrder.verifyNoMoreInteractions(); }
@Override public TypeInformation<T> getProducedType() { return type; }
@Test void testIndirectGenericExtension() { TypeInformation<String> type = new IndirectExtension().getProducedType(); assertThat(type).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowIfCanNotCoerceToDate() { // Given: final KsqlJsonDeserializer<java.sql.Date> deserializer = givenDeserializerForSchema(Date.SCHEMA, java.sql.Date.class); final byte[] bytes = serializeJson(BooleanNode.valueOf(true)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: DATE")))); }
public int remove(final K key) { final int initialValue = this.initialValue; final K[] keys = this.keys; final int[] values = this.values; @DoNotSub final int mask = values.length - 1; @DoNotSub int index = Hashing.hash(key, mask); int oldValue = initialValue; while (initialValue != values[index]) { if (Objects.equals(keys[index], key)) { oldValue = values[index]; values[index] = initialValue; size--; compactChain(index); break; } index = ++index & mask; } return oldValue; }
@Test void removeShouldReturnMissing() { assertEquals(INITIAL_VALUE, map.remove(1)); }
@Override public short getTypeCode() { return MessageType.TYPE_HEARTBEAT_MSG; }
@Test void getTypeCode() { Assertions.assertEquals(MessageType.TYPE_HEARTBEAT_MSG, HeartbeatMessage.PING.getTypeCode()); Assertions.assertEquals(MessageType.TYPE_HEARTBEAT_MSG, HeartbeatMessage.PONG.getTypeCode()); }
@Override public boolean isScanAllowedUsingPermissionsFromDevopsPlatform() { checkState(authAppInstallationToken != null, "An auth app token is required in case repository permissions checking is necessary."); String[] orgaAndRepoTokenified = devOpsProjectCreationContext.fullName().split("/"); String organization = orgaAndRepoTokenified[0]; String repository = orgaAndRepoTokenified[1]; Set<DevOpsPermissionsMappingDto> permissionsMappingDtos = dbClient.githubPermissionsMappingDao() .findAll(dbClient.openSession(false), devOpsPlatformSettings.getDevOpsPlatform()); boolean userHasDirectAccessToRepo = doesUserHaveScanPermission(organization, repository, permissionsMappingDtos); if (userHasDirectAccessToRepo) { return true; } return doesUserBelongToAGroupWithScanPermission(organization, repository, permissionsMappingDtos); }
@Test void isScanAllowedUsingPermissionsFromDevopsPlatform_whenAccessViaTeamButUserNotInTeam_returnsFalse() { GsonRepositoryTeam team1 = mockGithubTeam("team1", 1, "role1", "read", "another_perm"); GsonRepositoryTeam team2 = mockGithubTeam("team2", 2, "role2", "another_perm", UserRole.SCAN); mockTeamsFromApi(team1, team2); bindGroupsToUser(team1.name()); assertThat(githubProjectCreator.isScanAllowedUsingPermissionsFromDevopsPlatform()).isFalse(); }
protected HashMap<String, Double> computeModularity(Graph graph, CommunityStructure theStructure, int[] comStructure, double currentResolution, boolean randomized, boolean weighted) { isCanceled = false; Progress.start(progress); Random rand = new Random(); double totalWeight = theStructure.graphWeightSum; double[] nodeDegrees = theStructure.weights.clone(); HashMap<String, Double> results = new HashMap<>(); if (isCanceled) { return results; } boolean someChange = true; while (someChange) { someChange = false; boolean localChange = true; while (localChange) { localChange = false; int start = 0; if (randomized) { start = Math.abs(rand.nextInt()) % theStructure.N; } int step = 0; for (int i = start; step < theStructure.N; i = (i + 1) % theStructure.N) { step++; Community bestCommunity = updateBestCommunity(theStructure, i, currentResolution); if ((theStructure.nodeCommunities[i] != bestCommunity) && (bestCommunity != null)) { theStructure.moveNodeTo(i, bestCommunity); localChange = true; } if (isCanceled) { return results; } } someChange = localChange || someChange; if (isCanceled) { return results; } } if (someChange) { theStructure.zoomOut(); } } fillComStructure(graph, theStructure, comStructure); double[] degreeCount = fillDegreeCount(graph, theStructure, comStructure, nodeDegrees, weighted); double computedModularity = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, 1., weighted); double computedModularityResolution = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, currentResolution, weighted); results.put("modularity", computedModularity); results.put("modularityResolution", computedModularityResolution); return results; }
@Test public void testComputeBarbellGraphModularityNormalResolution() { GraphModel graphModel = GraphGenerator.generateCompleteUndirectedGraph(4); UndirectedGraph undirectedGraph = graphModel.getUndirectedGraph(); Node[] nodes = new Node[4]; for (int i = 0; i < 4; i++) { Node currentNode = graphModel.factory().newNode(((Integer) (i + 4)).toString()); nodes[i] = currentNode; undirectedGraph.addNode(currentNode); } for (int i = 0; i < 3; i++) { for (int j = i + 1; j < 4; j++) { Edge currentEdge = graphModel.factory().newEdge(nodes[i], nodes[j], false); undirectedGraph.addEdge(currentEdge); } } Edge currentEdge = graphModel.factory().newEdge(undirectedGraph.getNode("0"), undirectedGraph.getNode("5"), false); undirectedGraph.addEdge(currentEdge); UndirectedGraph graph = graphModel.getUndirectedGraph(); Modularity mod = new Modularity(); Modularity.CommunityStructure theStructure = mod.new CommunityStructure(graph); int[] comStructure = new int[graph.getNodeCount()]; HashMap<String, Double> modularityValues = mod.computeModularity(graph, theStructure, comStructure, 1., true, false); double modValue = modularityValues.get("modularity"); int class4 = comStructure[0]; int class5 = comStructure[5]; boolean correctResult = (class4 != class5 || modValue == 0.); assertTrue(correctResult); }
public T addFromMandatoryProperty(Props props, String propertyName) { String value = props.nonNullValue(propertyName); if (!value.isEmpty()) { String splitRegex = " (?=-)"; List<String> jvmOptions = Arrays.stream(value.split(splitRegex)).map(String::trim).toList(); checkOptionFormat(propertyName, jvmOptions); checkMandatoryOptionOverwrite(propertyName, jvmOptions); options.addAll(jvmOptions); } return castThis(); }
@Test public void addFromMandatoryProperty_fails_with_IAE_if_property_does_not_exist() { expectMissingPropertyIAE(() -> underTest.addFromMandatoryProperty(new Props(properties), this.randomPropertyName), this.randomPropertyName); }
public boolean isEmptyPath() { return _path.isEmpty(); }
@Test(dataProvider = "pathSpecsWithEmptyFlag") public void testIsEmptyPath(PathSpec testPathSpec, boolean expectedResponse) { Assert.assertEquals(testPathSpec.isEmptyPath(), expectedResponse); }
public static String formatFlows(long flows) { if (flows < 1) { return EMPTY; } return String.valueOf(flows) + SPACE + (flows > 1 ? FLOWS : FLOW); }
@Test public void formatNegativeFlows() { String f = TopoUtils.formatFlows(-3); assertEquals(AM_WL, "", f); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testRackListWithInvalidRacks() { MockRandom random = new MockRandom(); RackList rackList = new RackList(random, Arrays.asList( new UsableBroker(11, Optional.of("1"), false), new UsableBroker(10, Optional.of("1"), false), new UsableBroker(30, Optional.of("3"), true), new UsableBroker(31, Optional.of("3"), true), new UsableBroker(20, Optional.of("2"), true), new UsableBroker(21, Optional.of("2"), true), new UsableBroker(41, Optional.of("4"), false), new UsableBroker(40, Optional.of("4"), true)).iterator()); assertEquals(8, rackList.numTotalBrokers()); assertEquals(3, rackList.numUnfencedBrokers()); assertEquals(Arrays.asList(Optional.of("1"), Optional.of("2"), Optional.of("3"), Optional.of("4")), rackList.rackNames()); assertEquals(Arrays.asList(41, 11, 21, 30), rackList.place(4)); assertEquals(Arrays.asList(10, 20, 31, 41), rackList.place(4)); assertEquals(Arrays.asList(41, 21, 30, 11), rackList.place(4)); }
public Configuration getConf() { return this.conf; }
@Test /* * Tests copying from archive file system to a local file system */ public void testCopyToLocal() throws Exception { final String fullHarPathStr = makeArchive(); // make path to copy the file to: final String tmpDir = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath = new Path(tmpDir); final LocalFileSystem localFs = FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath, true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); // Create fresh HarFs: final HarFileSystem harFileSystem = new HarFileSystem(fs); try { final URI harUri = new URI(fullHarPathStr); harFileSystem.initialize(harUri, fs.getConf()); final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath = new Path(tmpPath, "straus"); // copy the Har file to a local file system: harFileSystem.copyToLocalFile(false, sourcePath, targetPath); FileStatus straus = localFs.getFileStatus(targetPath); // the file should contain just 1 character: assertEquals(1, straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath, true); } }
public static <T> Object create(Class<T> iface, T implementation, RetryPolicy retryPolicy) { return RetryProxy.create(iface, new DefaultFailoverProxyProvider<T>(iface, implementation), retryPolicy); }
@Test public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException { UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(UnreliableInterface.class, unreliableImpl, retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS)); unreliable.alwaysSucceeds(); unreliable.failsOnceThenSucceeds(); try { unreliable.failsTenTimesThenSucceeds(); fail("Should fail"); } catch (UnreliableException e) { // expected } }
static void checkValidTableName(String nameToCheck) { if (nameToCheck.length() < MIN_TABLE_ID_LENGTH) { throw new IllegalArgumentException("Table name cannot be empty. "); } if (nameToCheck.length() > MAX_TABLE_ID_LENGTH) { throw new IllegalArgumentException( "Table name " + nameToCheck + " cannot be longer than " + MAX_TABLE_ID_LENGTH + " characters."); } if (ILLEGAL_TABLE_CHARS.matcher(nameToCheck).find()) { throw new IllegalArgumentException( "Table name " + nameToCheck + " is not a valid name. Periods and forward slashes are not allowed."); } }
@Test public void testCheckValidTableNameDoesNotThrowErrorWhenNameIsValid() { checkValidTableName("A-l3gal_t4ble NAME!"); }
@Override @Transactional(rollbackFor = Exception.class) public void deleteCodegen(Long tableId) { // 校验是否已经存在 if (codegenTableMapper.selectById(tableId) == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } // 删除 table 表定义 codegenTableMapper.deleteById(tableId); // 删除 column 字段定义 codegenColumnMapper.deleteListByTableId(tableId); }
@Test public void testDeleteCodegen_success() { // mock 数据 CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(table); CodegenColumnDO column = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column); // 准备参数 Long tableId = table.getId(); // 调用 codegenService.deleteCodegen(tableId); // 断言 assertNull(codegenTableMapper.selectById(tableId)); assertEquals(0, codegenColumnMapper.selectList().size()); }
static int toInteger(final JsonNode object) { if (object instanceof NumericNode) { return object.intValue(); } if (object instanceof TextNode) { try { return Integer.parseInt(object.textValue()); } catch (final NumberFormatException e) { throw failedStringCoercionException(SqlBaseType.INTEGER); } } throw invalidConversionException(object, SqlBaseType.INTEGER); }
@Test public void shouldConvertToIntCorrectly() { final Integer i = JsonSerdeUtils.toInteger(JsonNodeFactory.instance.numberNode(1)); assertThat(i, equalTo(1)); }
static long appendRecords( Logger log, ControllerResult<?> result, int maxRecordsPerBatch, Function<List<ApiMessageAndVersion>, Long> appender ) { try { List<ApiMessageAndVersion> records = result.records(); if (result.isAtomic()) { // If the result must be written out atomically, check that it is not too large. // In general, we create atomic batches when it is important to commit "all, or // nothing". They are limited in size and must only be used when the batch size // is bounded. if (records.size() > maxRecordsPerBatch) { throw new IllegalStateException("Attempted to atomically commit " + records.size() + " records, but maxRecordsPerBatch is " + maxRecordsPerBatch); } long offset = appender.apply(records); if (log.isTraceEnabled()) { log.trace("Atomically appended {} record(s) ending with offset {}.", records.size(), offset); } return offset; } else { // If the result is non-atomic, then split it into as many batches as needed. // The appender callback will create an in-memory snapshot for each batch, // since we might need to revert to any of them. We will only return the final // offset of the last batch, however. int startIndex = 0, numBatches = 0; while (true) { numBatches++; int endIndex = startIndex + maxRecordsPerBatch; if (endIndex > records.size()) { long offset = appender.apply(records.subList(startIndex, records.size())); if (log.isTraceEnabled()) { log.trace("Appended {} record(s) in {} batch(es), ending with offset {}.", records.size(), numBatches, offset); } return offset; } else { appender.apply(records.subList(startIndex, endIndex)); } startIndex += maxRecordsPerBatch; } } } catch (ApiException e) { // If the Raft client throws a subclass of ApiException, we need to convert it into a // RuntimeException so that it will be handled as the unexpected exception that it is. // ApiExceptions are reserved for expected errors such as incorrect uses of controller // APIs, permission errors, NotControllerException, etc. etc. throw new RuntimeException(e); } }
@Test public void testAppendRecordsAtomically() { TestAppender appender = new TestAppender(); assertEquals("Attempted to atomically commit 5 records, but maxRecordsPerBatch is 2", assertThrows(IllegalStateException.class, () -> QuorumController.appendRecords(log, ControllerResult.atomicOf(Arrays.asList(rec(0), rec(1), rec(2), rec(3), rec(4)), null), 2, appender)).getMessage()); }
public static <Key extends Comparable, Value, ListType extends List<Value>> MultiMap<Key, Value, ListType> make(final boolean updatable, final NewSubMapProvider<Value, ListType> newSubMapProvider) { if (updatable) { return new ChangeHandledMultiMap<>(new RawMultiMap<>(newSubMapProvider)); } else { return new RawMultiMap<>(newSubMapProvider); } }
@Test void testDefault() throws Exception { assertThat(MultiMapFactory.make() instanceof ChangeHandledMultiMap).isFalse(); }
public static FactoryBuilder newFactoryBuilder(Propagation.Factory delegate) { return new FactoryBuilder(delegate); }
@Test void dupesNotOk() { SingleBaggageField userIdConfig = SingleBaggageField.local(BaggageField.create("userId")); BaggagePropagation.FactoryBuilder builder = newFactoryBuilder(B3Propagation.FACTORY) .add(userIdConfig); assertThatThrownBy(() -> builder.add(userIdConfig)) .isInstanceOf(IllegalArgumentException.class); }
public static String getDeprecatedMetricsInSonarQube104() { return "'" + String.join("', '","bugs", "new_bugs", "vulnerabilities", "new_vulnerabilities", "code_smells", "new_code_smells", "high_impact_accepted_issues") + "'"; }
@Test public void getDeprecatedMetricsInSonarQube104_shouldReturnExactString() { String actual = MeasuresWsModule.getDeprecatedMetricsInSonarQube104(); assertThat(actual).isEqualTo("'bugs', 'new_bugs', 'vulnerabilities', 'new_vulnerabilities', 'code_smells', 'new_code_smells', " + "'high_impact_accepted_issues'"); }
@Override public Local create(final Path file) { return this.create(new UUIDRandomStringService().random(), file); }
@Test public void testPathNotTooLong() { final String temp = StringUtils.removeEnd(System.getProperty("java.io.tmpdir"), File.separator); final String testPathDirectory = "/Lorem/ipsum/dolor/sit/amet/consetetur/sadipscing/elitr/sed/diam/nonumy/eirmod/tempor"; final String testPathFile = "takimata.sanc"; final String testPath = String.format("%s/%s", testPathDirectory, testPathFile); final String testPathMD5 = DigestUtils.md5Hex(testPathDirectory); Path file = new Path(testPath, EnumSet.of(Path.Type.file)); file.attributes().setVersionId("2"); final Local local = new DefaultTemporaryFileService().create("UID", file); final String localFile = local.getAbsolute(); assertEquals(String.format("%s/%s%s/1744299885/%s", temp, "UID", testPathDirectory, testPathFile).replace('/', File.separatorChar), localFile); assertNotEquals(String.format("%s/%s%s/1744299885/%s", temp, "UID", testPathMD5, testPathFile).replace('/', File.separatorChar), localFile); }
@Override public <S, C extends Config<S>> C addConfig(S subject, Class<C> configClass) { checkPermission(CONFIG_WRITE); checkNotNull(subject, NULL_SUBJECT_MSG); checkNotNull(configClass, NULL_CCLASS_MSG); return store.createConfig(subject, configClass); }
@Test public void testAddConfig() { assertThat(configService.getSubjectFactory(String.class), nullValue()); assertThat(configService.getSubjectFactory("key"), nullValue()); registry.registerConfigFactory(config1Factory); registry.registerConfigFactory(config2Factory); configService.addConfig("configKey", BasicConfig1.class); Config newConfig = configService.getConfig("configKey", BasicConfig1.class); assertThat(newConfig, notNullValue()); assertThat(configService.getSubjectFactory(String.class), notNullValue()); assertThat(configService.getSubjectFactory("key1"), notNullValue()); Set<Class> classes = configService.getSubjectClasses(); assertThat(classes, hasSize(1)); Set<String> subjectsForClass = configService.getSubjects(String.class); assertThat(subjectsForClass, hasSize(1)); Set<String> subjectsForConfig = configService.getSubjects(String.class, BasicConfig1.class); assertThat(subjectsForConfig, hasSize(1)); Class queriedConfigClass = configService.getConfigClass("key1", "config1"); assertThat(queriedConfigClass == BasicConfig1.class, is(true)); Set<? extends Config> configs = configService.getConfigs("configKey"); assertThat(configs.size(), is(1)); configs.forEach(c -> assertThat(c, instanceOf(BasicConfig1.class))); configService.removeConfig("configKey", BasicConfig1.class); Config newConfigAfterRemove = configService.getConfig("configKey", BasicConfig1.class); assertThat(newConfigAfterRemove, nullValue()); }
public static <T> Collection<T> load(Class<T> contract, ClassLoader... loaders) { Map<String, T> services = new LinkedHashMap<>(); if (loaders.length == 0) { try { ServiceLoader<T> loadedServices = ServiceLoader.load(contract); addServices(loadedServices, services); } catch (Exception e) { // Ignore } } else { for (ClassLoader loader : loaders) { if (loader == null) throw new NullPointerException(); try { ServiceLoader<T> loadedServices = ServiceLoader.load(contract, loader); addServices(loadedServices, services); } catch (Exception e) { // Ignore } } } if (services.isEmpty()) { LOG.debugf("No service impls found: %s", contract.getSimpleName()); } return services.values(); }
@Test public void testDuplicateServiceFinder() { ClassLoader mainClassLoader = this.getClass().getClassLoader(); ClassLoader otherClassLoader = new ClonedClassLoader(mainClassLoader); Collection<SampleSPI> spis = ServiceFinder.load(SampleSPI.class, mainClassLoader, otherClassLoader); assertEquals(1, spis.size()); }
public static <T> List<LocalProperty<T>> sorted(Collection<T> columns, SortOrder order) { return columns.stream().map(column -> new SortingProperty<>(column, order)).collect(toImmutableList()); }
@Test public void testDifferentSortOrders() { List<LocalProperty<String>> actual = builder() .sorted("a", SortOrder.ASC_NULLS_FIRST) .build(); assertMatch( actual, builder() .sorted("a", SortOrder.ASC_NULLS_LAST) .build(), Optional.of(sorted("a", SortOrder.ASC_NULLS_LAST))); }
@Override public PageResult<ArticleCategoryDO> getArticleCategoryPage(ArticleCategoryPageReqVO pageReqVO) { return articleCategoryMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetArticleCategoryPage() { // mock 数据 ArticleCategoryDO dbArticleCategory = randomPojo(ArticleCategoryDO.class, o -> { // 等会查询到 o.setName(null); o.setPicUrl(null); o.setStatus(null); o.setSort(null); o.setCreateTime(null); }); articleCategoryMapper.insert(dbArticleCategory); // 测试 name 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setName(null))); // 测试 picUrl 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setPicUrl(null))); // 测试 status 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setStatus(null))); // 测试 sort 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setSort(null))); // 测试 createTime 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setCreateTime(null))); // 准备参数 ArticleCategoryPageReqVO reqVO = new ArticleCategoryPageReqVO(); reqVO.setName(null); reqVO.setStatus(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<ArticleCategoryDO> pageResult = articleCategoryService.getArticleCategoryPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbArticleCategory, pageResult.getList().get(0)); }
@Override public Plugin getPluginInstance(String key) { checkState(started.get(), NOT_STARTED_YET); Plugin plugin = pluginInstancesByKeys.get(key); checkArgument(plugin != null, "Plugin [%s] does not exist", key); return plugin; }
@Test public void getPluginInstance_throws_ISE_if_repo_is_not_started() { assertThatThrownBy(() -> underTest.getPluginInstance("foo")) .isInstanceOf(IllegalStateException.class) .hasMessage("not started yet"); }
@Override public SCMPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { SCMPropertyConfiguration scmConfiguration = extension.getSCMConfiguration(descriptor.id()); SCMView scmView = extension.getSCMView(descriptor.id()); PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension); if (scmConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null scm configuration", descriptor.id())); } if (scmView == null) { throw new RuntimeException(format("Plugin[%s] returned null scm view", descriptor.id())); } PluggableInstanceSettings scmSettings = new PluggableInstanceSettings(scmPluginConfigurations(scmConfiguration), new PluginView(scmView.template())); return new SCMPluginInfo(descriptor, scmView.displayValue(), scmSettings, pluginSettingsAndView); }
@Test public void shouldThrowAnExceptionIfScmViewReturnedByPluginIsNull() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); when(extension.getSCMView("plugin1")).thenReturn(null); assertThatThrownBy(() -> new SCMPluginInfoBuilder(extension).pluginInfoFor(descriptor)) .hasMessage("Plugin[plugin1] returned null scm view"); }
@Override public void getChildren(final String path, final boolean watch, final AsyncCallback.ChildrenCallback cb, final Object ctx) { if (!SymlinkUtil.containsSymlink(path)) { _zk.getChildren(path, watch, cb, ctx); } else { SymlinkChildrenCallback compositeCallback = new SymlinkChildrenCallback(path, _defaultWatcher, cb); getChildren0(path, watch ? compositeCallback : null, compositeCallback, ctx); } }
@Test public void testSymlinkGetChildren() throws InterruptedException, ExecutionException, IOException { final CountDownLatch latch = new CountDownLatch(1); AsyncCallback.ChildrenCallback callback = new AsyncCallback.ChildrenCallback() { @Override public void processResult(int rc, String path, Object ctx, List<String> children) { KeeperException.Code result = KeeperException.Code.get(rc); Assert.assertEquals(result, KeeperException.Code.OK); Assert.assertEquals(path, "/foo/$link"); Assert.assertEquals(children.size(), 10); latch.countDown(); } }; // symlink: /foo/$link -> /foo/bar _zkClient.getZooKeeper().getChildren("/foo/$link", null, callback, null); latch.await(30, TimeUnit.SECONDS); }
int countHopsForTopic(String topic, String sourceClusterAlias) { int hops = 0; Set<String> visited = new HashSet<>(); while (true) { hops++; String source = replicationPolicy.topicSource(topic); if (source == null) { return -1; } if (source.equals(sourceClusterAlias)) { return hops; } if (visited.contains(source)) { // Extra check for IdentityReplicationPolicy and similar impls that cannot prevent cycles. // We assume we're stuck in a cycle and will never find sourceClusterAlias. return -1; } visited.add(source); topic = replicationPolicy.upstreamTopic(topic); } }
@Test public void countHopsForTopicTest() { MirrorClient client = new FakeMirrorClient(); assertEquals(-1, client.countHopsForTopic("topic", "source")); assertEquals(-1, client.countHopsForTopic("source", "source")); assertEquals(-1, client.countHopsForTopic("sourcetopic", "source")); assertEquals(-1, client.countHopsForTopic("source1.topic", "source2")); assertEquals(1, client.countHopsForTopic("source1.topic", "source1")); assertEquals(1, client.countHopsForTopic("source2.source1.topic", "source2")); assertEquals(2, client.countHopsForTopic("source2.source1.topic", "source1")); assertEquals(3, client.countHopsForTopic("source3.source2.source1.topic", "source1")); assertEquals(-1, client.countHopsForTopic("source3.source2.source1.topic", "source4")); }
@Override public void preflight(final Path workdir, final String filename) throws BackgroundException { if(!validate(filename)) { throw new InvalidFilenameException(MessageFormat.format(LocaleFactory.localizedString("Cannot create folder {0}", "Error"), filename)); } assumeRole(workdir, filename, CREATEDIRECTORIESPERMISSION); }
@Test public void testPreflightFileMissingCustomProps() throws Exception { final Path workdir = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); workdir.setAttributes(workdir.attributes().withAcl(Acl.EMPTY)); new CteraDirectoryFeature(session).preflight(workdir, new AlphanumericRandomStringService().random()); }
public static Object typeConvert(String tableName ,String columnName, String value, int sqlType, String mysqlType) { if (value == null || (value.equals("") && !(isText(mysqlType) || sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR))) { return null; } try { Object res; switch (sqlType) { case Types.INTEGER: res = Integer.parseInt(value); break; case Types.SMALLINT: res = Short.parseShort(value); break; case Types.BIT: case Types.TINYINT: res = Byte.parseByte(value); break; case Types.BIGINT: if (mysqlType.startsWith("bigint") && mysqlType.endsWith("unsigned")) { res = new BigInteger(value); } else { res = Long.parseLong(value); } break; // case Types.BIT: case Types.BOOLEAN: res = !"0".equals(value); break; case Types.DOUBLE: case Types.FLOAT: res = Double.parseDouble(value); break; case Types.REAL: res = Float.parseFloat(value); break; case Types.DECIMAL: case Types.NUMERIC: res = new BigDecimal(value); break; case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: case Types.BLOB: res = value.getBytes("ISO-8859-1"); break; case Types.DATE: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Date(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.TIME: { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Time(date.getTime()); } else { res = null; } break; } case Types.TIMESTAMP: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Timestamp(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.CLOB: default: res = value; break; } return res; } catch (Exception e) { logger.error("table: {} column: {}, failed convert type {} to {}", tableName, columnName, value, sqlType); return value; } }
@Test public void typeConvertInputNotNullNotNullNotNullNegativeNotNullOutputPositive2() { // Arrange final String tableName = "?????????"; final String columnName = "?"; final String value = "2"; final int sqlType = -5; final String mysqlType = "bigint"; // Act final Object actual = JdbcTypeUtil.typeConvert(tableName, columnName, value, sqlType, mysqlType); // Assert result Assert.assertEquals(2L, actual); }
public List<String> listAllNodes(String path) { return zkClient.listAllNodes(path); }
@Test public void testListAllNodes() { List<String> result = zooKeeperBufferedClient.listAllNodes(PARENT_PATH); Assert.assertEquals(Arrays.asList(CHILD_ONE_PATH, CHILE_TWO_PATh), result); }
@Override public void deleteTopics(final Collection<String> topicsToDelete) { if (topicsToDelete.isEmpty()) { return; } final DeleteTopicsResult deleteTopicsResult = adminClient.get().deleteTopics(topicsToDelete); final Map<String, KafkaFuture<Void>> results = deleteTopicsResult.topicNameValues(); final List<String> failList = Lists.newArrayList(); final List<Pair<String, Throwable>> exceptionList = Lists.newArrayList(); for (final Map.Entry<String, KafkaFuture<Void>> entry : results.entrySet()) { try { entry.getValue().get(30, TimeUnit.SECONDS); } catch (final Exception e) { final Throwable rootCause = ExceptionUtils.getRootCause(e); if (rootCause instanceof TopicDeletionDisabledException) { throw new TopicDeletionDisabledException("Topic deletion is disabled. " + "To delete the topic, you must set '" + DELETE_TOPIC_ENABLE + "' to true in " + "the Kafka broker configuration."); } else if (rootCause instanceof TopicAuthorizationException) { throw new KsqlTopicAuthorizationException( AclOperation.DELETE, Collections.singleton(entry.getKey())); } else if (!(rootCause instanceof UnknownTopicOrPartitionException)) { LOG.error(String.format("Could not delete topic '%s'", entry.getKey()), e); failList.add(entry.getKey()); exceptionList.add(new Pair<>(entry.getKey(), rootCause)); } } } if (!failList.isEmpty()) { throw new KafkaDeleteTopicsException("Failed to clean up topics: " + String.join(",", failList), exceptionList); } }
@Test public void shouldDeleteTopics() { // When: kafkaTopicClient.deleteTopics(ImmutableSet.of("the-topic")); // Then: verify(adminClient).deleteTopics(ImmutableSet.of("the-topic")); }
public static ExistsQueryBuilder existsQuery(String name) { return new ExistsQueryBuilder(name); }
@Test public void testExistsQuery() throws Exception { assertEquals("{\"exists\":{\"field\":\"k\"}}", toJson(QueryBuilders.existsQuery("k"))); }
public static Application fromServicesXml(String xml, Networking networking) { Path applicationDir = StandaloneContainerRunner.createApplicationPackage(xml); return new Application(applicationDir, networking, true); }
@Test void http_interface_is_off_when_networking_is_disabled() throws Exception { assertThrows(ConnectException.class, () -> { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); int statusCode = client.execute(new HttpGet("http://localhost:" + httpPort)).getStatusLine().getStatusCode(); fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode); Application unused = application; } }); }
@Override public void deleteSocialClient(Long id) { // 校验存在 validateSocialClientExists(id); // 删除 socialClientMapper.deleteById(id); }
@Test public void testDeleteSocialClient_success() { // mock 数据 SocialClientDO dbSocialClient = randomPojo(SocialClientDO.class); socialClientMapper.insert(dbSocialClient);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbSocialClient.getId(); // 调用 socialClientService.deleteSocialClient(id); // 校验数据不存在了 assertNull(socialClientMapper.selectById(id)); }
static Map<String, Expression> getPredictorTermFunctions(final List<PredictorTerm> predictorTerms) { predictorsArity.set(0); return predictorTerms.stream() .map(predictorTerm -> { int arity = predictorsArity.addAndGet(1); String variableName = predictorTerm.getName() != null ?predictorTerm.getName() : "predictorTermFunction" + arity; return new AbstractMap.SimpleEntry<>(variableName, getPredictorTermFunction(predictorTerm)); }) .collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue)); }
@Test void getPredictorTermFunctions() { final List<PredictorTerm> predictorTerms = IntStream.range(0, 3).mapToObj(index -> { String predictorName = "predictorName-" + index; double coefficient = 1.23 * index; String fieldRef = "fieldRef-" + index; return PMMLModelTestUtils.getPredictorTerm(predictorName, coefficient, Collections.singletonList(fieldRef)); }).collect(Collectors.toList()); Map<String, Expression> retrieved = KiePMMLRegressionTableFactory.getPredictorTermFunctions(predictorTerms); assertThat(retrieved).hasSameSizeAs(predictorTerms); IntStream.range(0, predictorTerms.size()).forEach(index -> { PredictorTerm predictorTerm = predictorTerms.get(index); assertThat(retrieved).containsKey(predictorTerm.getName()); }); }
public List<List<Object>> readParameterSets(final List<PostgreSQLColumnType> parameterTypes) { List<List<Object>> result = new ArrayList<>(batchNum); for (int i = 0; i < batchNum; i++) { result.add(readOneGroupOfParameters(parameterTypes)); } payload.skipReserved(payload.getByteBuf().readableBytes()); return result; }
@Test void assertConstructOpenGaussComBatchBindPacket() { PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(BATCH_BIND_MESSAGE_BYTES), StandardCharsets.UTF_8); assertThat(payload.readInt1(), is((int) 'U')); OpenGaussComBatchBindPacket actual = new OpenGaussComBatchBindPacket(payload); assertThat(actual.getStatementId(), is("S_1")); assertThat(actual.getEachGroupParametersCount(), is(3)); assertThat(actual.getParameterFormats(), is(Arrays.asList(0, 0, 0))); assertTrue(actual.getResultFormats().isEmpty()); List<List<Object>> actualParameterSets = actual.readParameterSets( Arrays.asList(PostgreSQLColumnType.INT4, PostgreSQLColumnType.VARCHAR, PostgreSQLColumnType.INT4)); assertThat(actualParameterSets.size(), is(3)); List<List<Object>> expectedParameterSets = Arrays.asList(Arrays.asList(1, "Foo", 18), Arrays.asList(2, "Bar", 36), Arrays.asList(3, "Tom", 54)); assertThat(actualParameterSets, is(expectedParameterSets)); }
public static Schema project(Schema schema, Set<Integer> fieldIds) { Preconditions.checkNotNull(schema, "Schema cannot be null"); Types.StructType result = project(schema.asStruct(), fieldIds); if (schema.asStruct().equals(result)) { return schema; } else if (result != null) { if (schema.getAliases() != null) { return new Schema(result.fields(), schema.getAliases()); } else { return new Schema(result.fields()); } } return new Schema(Collections.emptyList(), schema.getAliases()); }
@Test public void testProjectMap() { // We can't partially project keys because it changes key equality Schema schema = new Schema( Lists.newArrayList( required(10, "a", Types.IntegerType.get()), required(11, "A", Types.IntegerType.get()), required( 12, "map", Types.MapType.ofRequired( 13, 14, Types.StructType.of( optional(100, "x", Types.IntegerType.get()), optional(101, "y", Types.IntegerType.get())), Types.StructType.of( required(200, "z", Types.IntegerType.get()), optional( 201, "innerMap", Types.MapType.ofOptional( 202, 203, Types.IntegerType.get(), Types.StructType.of( required(300, "foo", Types.IntegerType.get()), required(301, "bar", Types.IntegerType.get()))))))))); assertThatThrownBy(() -> TypeUtil.project(schema, Sets.newHashSet(12))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Cannot explicitly project List or Map types"); assertThatThrownBy(() -> TypeUtil.project(schema, Sets.newHashSet(201))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Cannot explicitly project List or Map types"); Schema expectedTopLevel = new Schema(Lists.newArrayList(required(10, "a", Types.IntegerType.get()))); Schema actualTopLevel = TypeUtil.project(schema, Sets.newHashSet(10)); assertThat(actualTopLevel.asStruct()).isEqualTo(expectedTopLevel.asStruct()); Schema expectedDepthOne = new Schema( Lists.newArrayList( required(10, "a", Types.IntegerType.get()), required( 12, "map", Types.MapType.ofRequired( 13, 14, Types.StructType.of( optional(100, "x", Types.IntegerType.get()), optional(101, "y", Types.IntegerType.get())), Types.StructType.of())))); Schema actualDepthOne = TypeUtil.project(schema, Sets.newHashSet(10, 13, 14, 100, 101)); Schema actualDepthOneNoKeys = TypeUtil.project(schema, Sets.newHashSet(10, 13, 14)); assertThat(actualDepthOne.asStruct()).isEqualTo(expectedDepthOne.asStruct()); assertThat(actualDepthOneNoKeys.asStruct()).isEqualTo(expectedDepthOne.asStruct()); Schema expectedDepthTwo = new Schema( Lists.newArrayList( required(10, "a", Types.IntegerType.get()), required( 12, "map", Types.MapType.ofRequired( 13, 14, Types.StructType.of( optional(100, "x", Types.IntegerType.get()), optional(101, "y", Types.IntegerType.get())), Types.StructType.of( required(200, "z", Types.IntegerType.get()), optional( 201, "innerMap", Types.MapType.ofOptional( 202, 203, Types.IntegerType.get(), Types.StructType.of()))))))); Schema actualDepthTwo = TypeUtil.project(schema, Sets.newHashSet(10, 13, 14, 100, 101, 200, 202, 203)); assertThat(actualDepthTwo.asStruct()).isEqualTo(expectedDepthTwo.asStruct()); }
public static String randomUUID() { return toString(UUID.randomUUID(), false); }
@Test public void randomUUIDTest() { String randomUUID = IdUtil.randomUUID(); Assert.assertNotNull(randomUUID); }
public Iterator<List<Partition>> getNewPartsIterator(int batchSize) { return iteratePartitions(new_parts, batchSize); }
@Test public void testGetNewPartitionsIterator() { int batch = 0; List<Partition> actual = new ArrayList<>(); Map<Long, List<Partition>> idToParts = new HashMap<>(); Iterator<List<Partition>> iterator = event.getNewPartsIterator(batchSize); while (iterator.hasNext()) { List<Partition> partitions = iterator.next(); Assert.assertTrue(batchSize <=0 || partitions.size() <= batchSize); Long writeId = null; for (Partition part : partitions) { if (writeId == null) { writeId = part.getWriteId(); } else { Assert.assertEquals(writeId.longValue(), part.getWriteId()); } } idToParts.putIfAbsent(writeId, new ArrayList<>()); idToParts.get(writeId).addAll(partitions); batch++; actual.addAll(partitions); } Assert.assertEquals(5, idToParts.get(1L).size()); Assert.assertEquals(5, idToParts.get(2L).size()); Assert.assertEquals(writeIdToParts, idToParts); Assert.assertEquals(expectedBatch, batch); Assert.assertEquals(expectedParts, actual); }
@Override public boolean isQualified(final SQLStatementContext sqlStatementContext, final ReadwriteSplittingDataSourceGroupRule rule, final HintValueContext hintValueContext) { return isPrimaryRoute(sqlStatementContext, hintValueContext); }
@Test void assertWriteRouteStatement() { MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class); when(selectStatement.getLock()).thenReturn(Optional.of(new LockSegment(0, 1))); when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement); assertTrue(new QualifiedReadwriteSplittingPrimaryDataSourceRouter().isQualified(sqlStatementContext, null, hintValueContext)); when(sqlStatementContext.getSqlStatement()).thenReturn(mock(MySQLUpdateStatement.class)); assertTrue(new QualifiedReadwriteSplittingPrimaryDataSourceRouter().isQualified(sqlStatementContext, null, hintValueContext)); }
@Override public Mono<GetUnversionedProfileResponse> getUnversionedProfile(final GetUnversionedProfileAnonymousRequest request) { final ServiceIdentifier targetIdentifier = ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getRequest().getServiceIdentifier()); // Callers must be authenticated to request unversioned profiles by PNI if (targetIdentifier.identityType() == IdentityType.PNI) { throw Status.UNAUTHENTICATED.asRuntimeException(); } final Mono<Account> account = switch (request.getAuthenticationCase()) { case GROUP_SEND_TOKEN -> groupSendTokenUtil.checkGroupSendToken(request.getGroupSendToken(), List.of(targetIdentifier)) .then(Mono.fromFuture(() -> accountsManager.getByServiceIdentifierAsync(targetIdentifier))) .flatMap(Mono::justOrEmpty) .switchIfEmpty(Mono.error(Status.NOT_FOUND.asException())); case UNIDENTIFIED_ACCESS_KEY -> getTargetAccountAndValidateUnidentifiedAccess(targetIdentifier, request.getUnidentifiedAccessKey().toByteArray()); default -> Mono.error(Status.INVALID_ARGUMENT.asException()); }; return account.map(targetAccount -> ProfileGrpcHelper.buildUnversionedProfileResponse(targetIdentifier, null, targetAccount, profileBadgeConverter)); }
@Test void getUnversionedProfileGroupSendEndorsement() throws Exception { final UUID targetUuid = UUID.randomUUID(); final org.whispersystems.textsecuregcm.identity.ServiceIdentifier serviceIdentifier = new AciServiceIdentifier(targetUuid); // Expiration must be on a day boundary; we want one in the future final Instant expiration = Instant.now().plus(Duration.ofDays(1)).truncatedTo(ChronoUnit.DAYS); final byte[] token = AuthHelper.validGroupSendToken(SERVER_SECRET_PARAMS, List.of(serviceIdentifier), expiration); final ECKeyPair identityKeyPair = Curve.generateKeyPair(); final IdentityKey identityKey = new IdentityKey(identityKeyPair.getPublicKey()); final List<Badge> badges = List.of(new Badge( "TEST", "other", "Test Badge", "This badge is in unit tests.", List.of("l", "m", "h", "x", "xx", "xxx"), "SVG", List.of( new BadgeSvg("sl", "sd"), new BadgeSvg("ml", "md"), new BadgeSvg("ll", "ld"))) ); when(account.getBadges()).thenReturn(Collections.emptyList()); when(profileBadgeConverter.convert(any(), any(), anyBoolean())).thenReturn(badges); when(account.isUnrestrictedUnidentifiedAccess()).thenReturn(false); when(account.getIdentityKey(org.whispersystems.textsecuregcm.identity.IdentityType.ACI)).thenReturn(identityKey); when(accountsManager.getByServiceIdentifierAsync(serviceIdentifier)).thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final GetUnversionedProfileAnonymousRequest request = GetUnversionedProfileAnonymousRequest.newBuilder() .setGroupSendToken(ByteString.copyFrom(token)) .setRequest(GetUnversionedProfileRequest.newBuilder() .setServiceIdentifier( ServiceIdentifierUtil.toGrpcServiceIdentifier(serviceIdentifier)) .build()) .build(); final GetUnversionedProfileResponse response = unauthenticatedServiceStub().getUnversionedProfile(request); final GetUnversionedProfileResponse expectedResponse = GetUnversionedProfileResponse.newBuilder() .setIdentityKey(ByteString.copyFrom(identityKey.serialize())) .setUnrestrictedUnidentifiedAccess(false) .setCapabilities(ProfileGrpcHelper.buildUserCapabilities(UserCapabilities.createForAccount(account))) .addAllBadges(ProfileGrpcHelper.buildBadges(badges)) .build(); verify(accountsManager).getByServiceIdentifierAsync(serviceIdentifier); assertEquals(expectedResponse, response); }
@Around(value = "setAuditSpan(auditLog)") public Object around(ProceedingJoinPoint pjp, ApolloAuditLog auditLog) throws Throwable { String opName = auditLog.name(); try (AutoCloseable scope = api.appendAuditLog(auditLog.type(), opName, auditLog.description())) { Object proceed = pjp.proceed(); auditDataInfluenceArg(pjp); return proceed; } }
@Test public void testAround() throws Throwable { final OpType opType = OpType.CREATE; final String opName = "App.create"; final String description = "no description"; ProceedingJoinPoint mockPJP = mock(ProceedingJoinPoint.class); ApolloAuditLog mockAnnotation = mock(ApolloAuditLog.class); AutoCloseable mockScope = mock(AutoCloseable.class); { when(mockAnnotation.type()).thenReturn(opType); when(mockAnnotation.name()).thenReturn(opName); when(mockAnnotation.description()).thenReturn(description); when(api.appendAuditLog(eq(opType), eq(opName), eq(description))) .thenReturn(mockScope); doNothing().when(aspect).auditDataInfluenceArg(mockPJP); } aspect.around(mockPJP, mockAnnotation); verify(api, times(1)) .appendAuditLog(eq(opType), eq(opName), eq(description)); verify(mockScope, times(1)) .close(); verify(aspect, times(1)) .auditDataInfluenceArg(eq(mockPJP)); }
Path getDeviceProfileDefaultRuleChainTemplateFilePath() { return Paths.get(getDataDir(), JSON_DIR, TENANT_DIR, DEVICE_PROFILE_DIR, "rule_chain_template.json"); }
@Test void testDeviceProfileDefaultRuleChainTemplate() { validateRuleChainTemplate(installScripts.getDeviceProfileDefaultRuleChainTemplateFilePath()); }
public static boolean isNotBlank(CharSequence str) { return !isBlank(str); }
@Test public void isNotBlank() { String string = "null"; Assert.assertTrue(StringUtil.isNotBlank(string)); }
@SuppressWarnings("unchecked") public OAuthBearerToken validate(String accessToken) throws ValidateException { SerializedJwt serializedJwt = new SerializedJwt(accessToken); JwtContext jwt; try { jwt = jwtConsumer.process(serializedJwt.getToken()); } catch (InvalidJwtException e) { throw new ValidateException(String.format("Could not validate the access token: %s", e.getMessage()), e); } JwtClaims claims = jwt.getJwtClaims(); Object scopeRaw = getClaim(() -> claims.getClaimValue(scopeClaimName), scopeClaimName); Collection<String> scopeRawCollection; if (scopeRaw instanceof String) scopeRawCollection = Collections.singletonList((String) scopeRaw); else if (scopeRaw instanceof Collection) scopeRawCollection = (Collection<String>) scopeRaw; else scopeRawCollection = Collections.emptySet(); NumericDate expirationRaw = getClaim(claims::getExpirationTime, ReservedClaimNames.EXPIRATION_TIME); String subRaw = getClaim(() -> claims.getStringClaimValue(subClaimName), subClaimName); NumericDate issuedAtRaw = getClaim(claims::getIssuedAt, ReservedClaimNames.ISSUED_AT); Set<String> scopes = ClaimValidationUtils.validateScopes(scopeClaimName, scopeRawCollection); long expiration = ClaimValidationUtils.validateExpiration(ReservedClaimNames.EXPIRATION_TIME, expirationRaw != null ? expirationRaw.getValueInMillis() : null); String sub = ClaimValidationUtils.validateSubject(subClaimName, subRaw); Long issuedAt = ClaimValidationUtils.validateIssuedAt(ReservedClaimNames.ISSUED_AT, issuedAtRaw != null ? issuedAtRaw.getValueInMillis() : null); return new BasicOAuthBearerToken(accessToken, scopes, expiration, sub, issuedAt); }
@Test public void testMissingSubShouldBeValid() throws Exception { String subClaimName = "client_id"; String subject = "otherSub"; PublicJsonWebKey jwk = createRsaJwk(); AccessTokenBuilder tokenBuilder = new AccessTokenBuilder() .jwk(jwk) .alg(AlgorithmIdentifiers.RSA_USING_SHA256) .addCustomClaim(subClaimName, subject) .subjectClaimName(subClaimName) .subject(null); AccessTokenValidator validator = createAccessTokenValidator(tokenBuilder); // Validation should succeed (e.g. signature verification) even if sub claim is missing OAuthBearerToken token = validator.validate(tokenBuilder.build()); assertEquals(subject, token.principalName()); }
@Override public SegmentResult next(SegmentContext ctx) { long n = now.next(null); final String serviceName = ctx.serviceName; final String serviceInstanceName = ctx.serviceInstanceName; final String endpointName = getEndpointName().next(null); if (segmentId == null) { StringGenerator.Builder segmentIdBuilder = new StringGenerator.Builder(); segmentIdBuilder.setLength(20); segmentIdBuilder.setNumbers(true); segmentIdBuilder.setLetters(true); segmentId = segmentIdBuilder.build(); } final SegmentReference sr = Optional.ofNullable(ctx.parentSegment).flatMap(parentSegment -> parentSegment.segmentObject.getSpansList().stream() .filter(span -> !Strings.isNullOrEmpty(span.getPeer())) .findFirst().map(span -> SegmentReference .newBuilder() .setTraceId(ctx.traceId) .setParentServiceInstance(parentSegment.segmentObject.getServiceInstance()) .setParentService(parentSegment.segmentObject.getService()) .setParentSpanId(span.getSpanId()) .setParentTraceSegmentId(parentSegment.segment.getSegmentId()) .setParentEndpoint(IDManager.EndpointID.analysisId(parentSegment.segment.getEndpointId()).getEndpointName()) .setNetworkAddressUsedAtPeer(serviceInstanceName) .build())) .orElse(null); final String segmentId = getSegmentId().next(null); final List<SpanGenerator> spanGenerators = getSpans().next(null); int size = spanGenerators.size(); final SegmentObject segmentObj = SegmentObject .newBuilder() .setTraceId(ctx.traceId) .setTraceSegmentId(segmentId) .addAllSpans( IntStream.range(0, size) .mapToObj(i -> { SpanGenerator sg = spanGenerators.get(i); return sg.next(new SpanGenerator.SpanGeneratorContext(i, size, sr, ctx.peer, n)); }) .collect(Collectors.<SpanObject>toList())) .setService(serviceName) .setServiceInstance(serviceInstanceName) .build(); // Reset the span generator to generate the span id from 0 getSpans().reset(); Long latency = segmentObj.getSpansList().stream().reduce(0L, (l, span) -> l + (span.getEndTime() - span.getStartTime()), Long::sum); final Segment segment = new Segment(); segment.setSegmentId(segmentId); segment.setTraceId(ctx.traceId); segment.setServiceId( IDManager.ServiceID.buildId(serviceName, true)); segment.setServiceInstanceId( IDManager.ServiceInstanceID.buildId( segment.getServiceId(), serviceInstanceName)); segment.setEndpointId( IDManager.EndpointID.buildId( segment.getServiceId(), endpointName)); segment.setStartTime(n - latency); segment.setLatency(latency.intValue()); segment.setIsError(getError().next(null).intValue()); segment.setTimeBucket(TimeBucket.getRecordTimeBucket(segment.getStartTime())); segment.setTags( getTags() .next(null) .stream() .map(tg -> tg.next(null)) .collect(Collectors.<Tag>toList())); return new SegmentResult(segment, segmentObj); }
@Test void next() throws URISyntaxException, IOException { ObjectMapper objectMapper = new ObjectMapper(); URL url = getClass().getClassLoader().getResource("segment.tpl.json"); assertNotNull(url); File jsonFile = new File(url.toURI()); SegmentRequest sr = objectMapper.readValue(jsonFile, SegmentRequest.class); sr.init(""); Set<String> serviceSet = new HashSet<>(); Set<String> serviceInstanceSet = new HashSet<>(); Set<String> endpointSet = new HashSet<>(); for (int i = 0; i < 1000; i++) { List<SegmentGenerator.SegmentResult> ss = sr.next(null); assertFalse(ss.isEmpty()); for (SegmentGenerator.SegmentResult s : ss) { serviceSet.add(s.segmentObject.getService()); serviceInstanceSet.add(s.segmentObject.getServiceInstance()); endpointSet.add(s.segment.getEndpointId()); } } assertTrue(serviceSet.size() > 1); assertTrue(serviceSet.size() <= 10); assertTrue(serviceInstanceSet.size() > 1); assertTrue(serviceInstanceSet.size() <= 100); assertTrue(endpointSet.size() > 1); assertTrue(endpointSet.size() <= 100); }
private static Map<String, Set<Dependency>> checkOptionalFlags( Map<String, Set<Dependency>> bundledDependenciesByModule, Map<String, DependencyTree> dependenciesByModule) { final Map<String, Set<Dependency>> allViolations = new HashMap<>(); for (String module : bundledDependenciesByModule.keySet()) { LOG.debug("Checking module '{}'.", module); if (!dependenciesByModule.containsKey(module)) { throw new IllegalStateException( String.format( "Module %s listed by shade-plugin, but not dependency-plugin.", module)); } final Collection<Dependency> bundledDependencies = bundledDependenciesByModule.get(module); final DependencyTree dependencyTree = dependenciesByModule.get(module); final Set<Dependency> violations = checkOptionalFlags(module, bundledDependencies, dependencyTree); if (violations.isEmpty()) { LOG.info("OK: {}", module); } else { allViolations.put(module, violations); } } return allViolations; }
@Test void testDirectBundledDependencyMustBeOptional() { final Dependency dependency = createMandatoryDependency("a"); final Set<Dependency> bundled = Collections.singleton(dependency); final DependencyTree dependencyTree = new DependencyTree().addDirectDependency(dependency); final Set<Dependency> violations = ShadeOptionalChecker.checkOptionalFlags(MODULE, bundled, dependencyTree); assertThat(violations).containsExactly(dependency); }
@Deprecated public static UnboundedSource<Long, CounterMark> unboundedWithTimestampFn( SerializableFunction<Long, Instant> timestampFn) { return new UnboundedCountingSource(0, 1, 1L, Duration.ZERO, timestampFn); }
@Test @Category(NeedsRunner.class) public void testUnboundedSourceTimestamps() { long numElements = 1000; PCollection<Long> input = p.apply( Read.from(CountingSource.unboundedWithTimestampFn(new ValueAsTimestampFn())) .withMaxNumRecords(numElements)); addCountingAsserts(input, numElements); PCollection<Long> diffs = input .apply("TimestampDiff", ParDo.of(new ElementValueDiff())) .apply("DistinctTimestamps", Distinct.create()); // This assert also confirms that diffs only has one unique value. PAssert.thatSingleton(diffs).isEqualTo(0L); p.run(); }
public static long getChecksumLength(long size, int bytesPerSum) { //the checksum length is equal to size passed divided by bytesPerSum + //bytes written in the beginning of the checksum file. return ((size + bytesPerSum - 1) / bytesPerSum) * FSInputChecker.CHECKSUM_SIZE + ChecksumFSInputChecker.HEADER_LENGTH; }
@Test public void testgetChecksumLength() throws Exception { assertEquals(8, ChecksumFileSystem.getChecksumLength(0L, 512)); assertEquals(12, ChecksumFileSystem.getChecksumLength(1L, 512)); assertEquals(12, ChecksumFileSystem.getChecksumLength(512L, 512)); assertEquals(16, ChecksumFileSystem.getChecksumLength(513L, 512)); assertEquals(16, ChecksumFileSystem.getChecksumLength(1023L, 512)); assertEquals(16, ChecksumFileSystem.getChecksumLength(1024L, 512)); assertEquals(408, ChecksumFileSystem.getChecksumLength(100L, 1)); assertEquals(4000000000008L, ChecksumFileSystem.getChecksumLength(10000000000000L, 10)); }
public static <T> KNN<T> fit(T[] x, int[] y, Distance<T> distance) { return fit(x, y, 1, distance); }
@Test public void testIris() { System.out.println("Iris"); ClassificationMetrics metrics = LOOCV.classification(Iris.x, Iris.y, (x, y) -> KNN.fit(x, y,1)); System.out.println("1-NN Error: " + metrics); assertEquals(0.96, metrics.accuracy, 1E-4); metrics = LOOCV.classification(Iris.x, Iris.y, (x, y) -> KNN.fit(x, y,3)); System.out.println("3-NN Error: " + metrics); assertEquals(0.96, metrics.accuracy, 1E-4); metrics = LOOCV.classification(Iris.x, Iris.y, (x, y) -> KNN.fit(x, y,5)); System.out.println("5-NN Error: " + metrics); assertEquals(0.9667, metrics.accuracy, 1E-4); metrics = LOOCV.classification(Iris.x, Iris.y, (x, y) -> KNN.fit(x, y,7)); System.out.println("7-NN Error: " + metrics); assertEquals(0.9667, metrics.accuracy, 1E-4); }
@Override public String getSessionId() { return sessionID; }
@Test public void testEditConfigRequest() { log.info("Starting edit-config async"); assertNotNull("Incorrect sessionId", session1.getSessionId()); try { assertTrue("NETCONF edit-config command failed", session1.editConfig(RUNNING, null, SAMPLE_REQUEST)); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF edit-config test failed: " + e.getMessage()); } log.info("Finishing edit-config async"); }
public Predicate<InMemoryFilterable> parse(final List<String> filterExpressions, final List<EntityAttribute> attributes) { if (filterExpressions == null || filterExpressions.isEmpty()) { return Predicates.alwaysTrue(); } final Map<String, List<Filter>> groupedByField = filterExpressions.stream() .map(expr -> singleFilterParser.parseSingleExpression(expr, attributes)) .collect(groupingBy(Filter::field)); return groupedByField.values().stream() .map(grouped -> grouped.stream() .map(Filter::toPredicate) .collect(Collectors.toList())) .map(groupedPredicates -> groupedPredicates.stream().reduce(Predicate::or).orElse(Predicates.alwaysTrue())) .reduce(Predicate::and).orElse(Predicates.alwaysTrue()); }
@Test void returnsAlwaysTruePredicateOnEmptyFilterList() { assertThat(toTest.parse(List.of(), List.of())) .isEqualTo(Predicates.alwaysTrue()); }
@Override public IssuerServiceResponse getIssuer(HttpServletRequest request) { // if the issuer is passed in, return that String iss = request.getParameter("iss"); if (!Strings.isNullOrEmpty(iss)) { if (!whitelist.isEmpty() && !whitelist.contains(iss)) { throw new AuthenticationServiceException("Whitelist was nonempty, issuer was not in whitelist: " + iss); } if (blacklist.contains(iss)) { throw new AuthenticationServiceException("Issuer was in blacklist: " + iss); } return new IssuerServiceResponse(iss, request.getParameter("login_hint"), request.getParameter("target_link_uri")); } else { try { // otherwise, need to forward to the account chooser String redirectUri = request.getRequestURL().toString(); URIBuilder builder = new URIBuilder(accountChooserUrl); builder.addParameter("redirect_uri", redirectUri); return new IssuerServiceResponse(builder.build().toString()); } catch (URISyntaxException e) { throw new AuthenticationServiceException("Account Chooser URL is not valid", e); } } }
@Test public void getIssuer_noIssuer() { Mockito.when(request.getParameter("iss")).thenReturn(null); IssuerServiceResponse response = service.getIssuer(request); assertThat(response.getIssuer(), nullValue()); assertThat(response.getLoginHint(), nullValue()); assertThat(response.getTargetLinkUri(), nullValue()); String expectedRedirectUrl = accountChooserUrl + "?redirect_uri=" + "https%3A%2F%2Fwww.example.com"; // url-encoded string of the request url assertThat(response.getRedirectUrl(), equalTo(expectedRedirectUrl)); }
public String[] checkParameters() { ArrayList<String> remarks = new ArrayList<>(); if ( getDatabaseInterface() == null ) { remarks.add( BaseMessages.getString( PKG, "DatabaseMeta.BadInterface" ) ); } if ( getName() == null || getName().length() == 0 ) { remarks.add( BaseMessages.getString( PKG, "DatabaseMeta.BadConnectionName" ) ); } if ( !isPartitioned() && ( ( (BaseDatabaseMeta) getDatabaseInterface() ).requiresName() && !( getDatabaseInterface() instanceof GenericDatabaseMeta ) ) ) { if ( getDatabaseName() == null || getDatabaseName().length() == 0 ) { remarks.add( BaseMessages.getString( PKG, "DatabaseMeta.BadDatabaseName" ) ); } } return remarks.toArray( new String[ remarks.size() ] ); }
@Test public void testCheckParameters() { DatabaseMeta meta = mock( DatabaseMeta.class ); BaseDatabaseMeta databaseInterface = mock( BaseDatabaseMeta.class ); when( databaseInterface.requiresName() ).thenReturn( true ); when( meta.getDatabaseInterface() ).thenReturn( databaseInterface ); when( meta.getName() ).thenReturn( null ); when( meta.isPartitioned() ).thenReturn( false ); when( meta.checkParameters() ).thenCallRealMethod(); assertEquals( 2, meta.checkParameters().length ); }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void invokeTimeUnitsParamsWithOffsetWithNanoseconds() { FunctionTestUtil.assertResult( timeFunction.invoke(10, 43, BigDecimal.valueOf(15.154), Duration.ofHours(1)), OffsetTime.of(10, 43, 15, 154000000, ZoneOffset.ofHours(1))); FunctionTestUtil.assertResult( timeFunction.invoke(10, 43, BigDecimal.valueOf(15.154), Duration.ofHours(-1)), OffsetTime.of(10, 43, 15, 154000000, ZoneOffset.ofHours(-1))); }
public static List<TransactionHook> getHooks() throws IllegalStateException { List<TransactionHook> hooks = LOCAL_HOOKS.get(); if (hooks == null || hooks.isEmpty()) { return Collections.emptyList(); } return Collections.unmodifiableList(hooks); }
@Test public void testGetHooks() { assertThat(TransactionHookManager.getHooks()).isEmpty(); TransactionHookManager.registerHook(new TransactionHookAdapter()); assertThat(TransactionHookManager.getHooks()).isNotEmpty(); }
@Override public IntMaximum clone() { IntMaximum clone = new IntMaximum(); clone.max = this.max; return clone; }
@Test void testClone() { IntMaximum max = new IntMaximum(); int value = 42; max.add(value); IntMaximum clone = max.clone(); assertThat(clone.getLocalValue().intValue()).isEqualTo(value); }
@Override public void restartTasks(final Set<ExecutionVertexID> verticesToRestart) { final Set<SchedulingPipelinedRegion> regionsToRestart = verticesToRestart.stream() .map(schedulingTopology::getPipelinedRegionOfVertex) .collect(Collectors.toSet()); scheduledRegions.removeAll(regionsToRestart); maybeScheduleRegions(regionsToRestart); }
@Test void testRestartTasks() { final PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(testingSchedulingTopology); final Set<ExecutionVertexID> verticesToRestart = Stream.of(source, map1, map2, map3, sink) .flatMap(List::stream) .map(TestingSchedulingExecutionVertex::getId) .collect(Collectors.toSet()); schedulingStrategy.restartTasks(verticesToRestart); final List<List<TestingSchedulingExecutionVertex>> expectedScheduledVertices = new ArrayList<>(); expectedScheduledVertices.add(Arrays.asList(source.get(0), map1.get(0))); expectedScheduledVertices.add(Arrays.asList(source.get(1), map1.get(1))); expectedScheduledVertices.add(Arrays.asList(map2.get(0))); expectedScheduledVertices.add(Arrays.asList(map2.get(1))); expectedScheduledVertices.add(Arrays.asList(map3.get(0))); expectedScheduledVertices.add(Arrays.asList(map3.get(1))); assertLatestScheduledVerticesAreEqualTo( expectedScheduledVertices, testingSchedulerOperation); }
public ClientConfig build() { return build(Thread.currentThread().getContextClassLoader()); }
@Override @Test public void loadingThroughSystemProperty_existingFile() throws IOException { String xml = HAZELCAST_CLIENT_START_TAG + " <cluster-name>foobar</cluster-name>\n" + "</hazelcast-client>"; File file = File.createTempFile("foo", ".xml"); file.deleteOnExit(); PrintWriter writer = new PrintWriter(file, StandardCharsets.UTF_8); writer.println(xml); writer.close(); System.setProperty("hazelcast.client.config", file.getAbsolutePath()); XmlClientConfigBuilder configBuilder = new XmlClientConfigBuilder(); ClientConfig config = configBuilder.build(); assertEquals("foobar", config.getClusterName()); }
@Override public String identifier() { return "datastoreV1"; }
@Test public void testGetTableType() { assertEquals("datastoreV1", provider.identifier()); }
@Override public <R> List<R> queryMany(String sql, Object[] args, RowMapper<R> mapper) { return queryMany(jdbcTemplate, sql, args, mapper); }
@Test void testQueryMany2() { final String sql = "SELECT id, data_id, group_id FROM config_info WHERE id >= ? AND id <= ?"; final Object[] args = new Object[] {1, 2}; final List<Map<String, Object>> resultList = new ArrayList<>(); Map<String, Object> map1 = new HashMap<>(); map1.put("id", 1); map1.put("data_id", "test"); map1.put("group_id", "test"); final Map<String, Object> map2 = new HashMap<>(); map1.put("id", 2); map1.put("data_id", "test"); map1.put("group_id", "test"); resultList.add(map1); resultList.add(map2); when(jdbcTemplate.queryForList(sql, args)).thenReturn(resultList); assertEquals(operate.queryMany(sql, args), resultList); }
@GET public Response getContainers(@PathParam("version") String version, @HeaderParam(HEADER_ACCEPT) String acceptHeader, @HeaderParam(HEADER_ACCEPT_ENCODING) String acceptEncoding, @HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept, @Context UriInfo uriInfo, @Nullable @QueryParam("regions") String regionsStr) { boolean isRemoteRegionRequested = null != regionsStr && !regionsStr.isEmpty(); String[] regions = null; if (!isRemoteRegionRequested) { EurekaMonitors.GET_ALL.increment(); } else { regions = regionsStr.toLowerCase().split(","); Arrays.sort(regions); // So we don't have different caches for same regions queried in different order. EurekaMonitors.GET_ALL_WITH_REMOTE_REGIONS.increment(); } // Check if the server allows the access to the registry. The server can // restrict access if it is not // ready to serve traffic depending on various reasons. if (!registry.shouldAllowAccess(isRemoteRegionRequested)) { return Response.status(Status.FORBIDDEN).build(); } CurrentRequestVersion.set(Version.toEnum(version)); KeyType keyType = Key.KeyType.JSON; String returnMediaType = MediaType.APPLICATION_JSON; if (acceptHeader == null || !acceptHeader.contains(HEADER_JSON_VALUE)) { keyType = Key.KeyType.XML; returnMediaType = MediaType.APPLICATION_XML; } Key cacheKey = new Key(Key.EntityType.Application, ResponseCacheImpl.ALL_APPS, keyType, CurrentRequestVersion.get(), EurekaAccept.fromString(eurekaAccept), regions ); Response response; if (acceptEncoding != null && acceptEncoding.contains(HEADER_GZIP_VALUE)) { response = Response.ok(responseCache.getGZIP(cacheKey)) .header(HEADER_CONTENT_ENCODING, HEADER_GZIP_VALUE) .header(HEADER_CONTENT_TYPE, returnMediaType) .build(); } else { response = Response.ok(responseCache.get(cacheKey)) .build(); } CurrentRequestVersion.remove(); logger.debug("Sent registry information to client."); return response; }
@Test public void testFullAppsGetGzipXmlHeaderType() throws Exception { Response response = applicationsResource.getContainers( Version.V2.name(), MediaType.APPLICATION_XML, "gzip", // encoding EurekaAccept.full.name(), null, // uriInfo null // remote regions ); assertThat(response.getMetadata().getFirst("Content-Encoding").toString(), is("gzip")); assertThat(response.getMetadata().getFirst("Content-Type").toString(), is(MediaType.APPLICATION_XML)); }
protected static SimpleDateFormat getLog4j2Appender() { Optional<Appender> log4j2xmlAppender = configuration.getAppenders().values().stream() .filter( a -> a.getName().equalsIgnoreCase( log4J2Appender ) ).findFirst(); if ( log4j2xmlAppender.isPresent() ) { ArrayList<String> matchesArray = new ArrayList<>(); String dateFormatFromLog4j2xml = log4j2xmlAppender.get().getLayout().getContentFormat().get( "format" ); Pattern pattern = Pattern.compile( "(\\{(.*?)})" ); Matcher matcher = pattern.matcher( dateFormatFromLog4j2xml ); while ( matcher.find() ) { matchesArray.add( matcher.group( 2 ) ); } if ( !matchesArray.isEmpty() ) { return processMatches( matchesArray ); } } return new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss" ); }
@Test public void testGetLog4j2UsingAppender5() { // Testing dd-MMM-yyyy HH:mm:ss,SSS pattern KettleLogLayout.log4J2Appender = "pdi-execution-appender-test-5"; Assert.assertEquals( "dd-MMM-yyyy HH:mm:ss,SSS", KettleLogLayout.getLog4j2Appender().toPattern() ); }
@Override protected TableSchema transformTableSchema() { tryOpen(); List<String> inputColumnsMapping = new ArrayList<>(); SeaTunnelRowType outRowType = sqlEngine.typeMapping(inputColumnsMapping); List<String> outputColumns = Arrays.asList(outRowType.getFieldNames()); TableSchema.Builder builder = TableSchema.builder(); if (inputCatalogTable.getTableSchema().getPrimaryKey() != null && outputColumns.containsAll( inputCatalogTable.getTableSchema().getPrimaryKey().getColumnNames())) { builder.primaryKey(inputCatalogTable.getTableSchema().getPrimaryKey().copy()); } List<ConstraintKey> outputConstraintKeys = inputCatalogTable.getTableSchema().getConstraintKeys().stream() .filter( key -> { List<String> constraintColumnNames = key.getColumnNames().stream() .map( ConstraintKey.ConstraintKeyColumn ::getColumnName) .collect(Collectors.toList()); return outputColumns.containsAll(constraintColumnNames); }) .map(ConstraintKey::copy) .collect(Collectors.toList()); builder.constraintKey(outputConstraintKeys); String[] fieldNames = outRowType.getFieldNames(); SeaTunnelDataType<?>[] fieldTypes = outRowType.getFieldTypes(); List<Column> columns = new ArrayList<>(fieldNames.length); for (int i = 0; i < fieldNames.length; i++) { Column simpleColumn = null; String inputColumnName = inputColumnsMapping.get(i); if (inputColumnName != null) { for (Column inputColumn : inputCatalogTable.getTableSchema().getColumns()) { if (inputColumnName.equals(inputColumn.getName())) { simpleColumn = inputColumn; break; } } } Column column; if (simpleColumn != null) { column = new PhysicalColumn( fieldNames[i], fieldTypes[i], simpleColumn.getColumnLength(), simpleColumn.getScale(), simpleColumn.isNullable(), simpleColumn.getDefaultValue(), simpleColumn.getComment(), simpleColumn.getSourceType(), simpleColumn.getOptions()); } else { column = PhysicalColumn.of(fieldNames[i], fieldTypes[i], 0, true, null, null); } columns.add(column); } return builder.columns(columns).build(); }
@Test public void testScaleSupport() { SQLTransform sqlTransform = new SQLTransform(READONLY_CONFIG, getCatalogTable()); TableSchema tableSchema = sqlTransform.transformTableSchema(); tableSchema .getColumns() .forEach( column -> { if (column.getName().equals(TIMESTAMP_FILEDNAME)) { Assertions.assertEquals(9, column.getScale()); } else if (column.getName().equals(GENERATE_PARTITION_KEY)) { Assertions.assertTrue(Objects.isNull(column.getScale())); } else { Assertions.assertEquals(3, column.getColumnLength()); } }); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test public void testWithStringCondition() throws Exception { final TestExtractor extractor = new TestExtractor.Builder() .conditionType(STRING) .conditionValue("hello") .build(); // Extractor runs if the message contains the condition value "hello". final Message msg1 = createMessage("hello world"); extractor.runExtractor(msg1); assertThat(msg1.hasField("target")).isTrue(); // Extractor does not run if the message does not contain the condition value. final Message msg2 = createMessage("the message"); extractor.runExtractor(msg2); assertThat(msg2.hasField("target")).isFalse(); }
RegistryEndpointProvider<Void> committer(URL location) { return new Committer(location); }
@Test public void testCommitter_handleResponse() throws IOException, RegistryException { Assert.assertNull( testBlobPusher.committer(mockUrl).handleResponse(Mockito.mock(Response.class))); }
@Override protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this); setProperties(endpoint, parameters); return endpoint; }
@Test public void testPropertiesSet() throws Exception { Map<String, Object> params = new HashMap<>(); params.put("username", "ugol"); params.put("password", "pwd"); params.put("additionalHosts", "127.0.0.1,example.com,another-host"); params.put("persistTo", 2); params.put("replicateTo", 3); params.put("bucket", "bucket"); String uri = "couchdb:http://localhost:91234"; String remaining = "http://localhost:91234"; CouchbaseEndpoint endpoint = component.createEndpoint(uri, remaining, params); assertEquals("http", endpoint.getProtocol()); assertEquals("localhost", endpoint.getHostname()); assertEquals(91234, endpoint.getPort()); assertEquals("ugol", endpoint.getUsername()); assertEquals("pwd", endpoint.getPassword()); assertEquals("127.0.0.1,example.com,another-host", endpoint.getAdditionalHosts()); assertEquals(2, endpoint.getPersistTo()); assertEquals(3, endpoint.getReplicateTo()); }
@Override public InetSocketAddress getLocalAddress() { return client.getLocalAddress(); }
@Test void test_share_connect() { init(0, 1); Assertions.assertEquals(demoClient.getLocalAddress(), helloClient.getLocalAddress()); Assertions.assertEquals(demoClient, helloClient); destroy(); }
public static void checkValidProjectId(String idToCheck) { if (idToCheck.length() < MIN_PROJECT_ID_LENGTH) { throw new IllegalArgumentException("Project ID " + idToCheck + " cannot be empty."); } if (idToCheck.length() > MAX_PROJECT_ID_LENGTH) { throw new IllegalArgumentException( "Project ID " + idToCheck + " cannot be longer than " + MAX_PROJECT_ID_LENGTH + " characters."); } if (ILLEGAL_PROJECT_CHARS.matcher(idToCheck).find()) { throw new IllegalArgumentException( "Project ID " + idToCheck + " is not a valid ID. Only letters, numbers, hyphens, single quotes, colon, dot and" + " exclamation points are allowed."); } }
@Test public void testCheckValidProjectIdWhenIdIsTooShort() { assertThrows(IllegalArgumentException.class, () -> checkValidProjectId("abc")); }
public CompletableFuture<AuthenticatedBackupUser> authenticateBackupUser( final BackupAuthCredentialPresentation presentation, final byte[] signature) { final PresentationSignatureVerifier signatureVerifier = verifyPresentation(presentation); return backupsDb .retrieveAuthenticationData(presentation.getBackupId()) .thenApply(optionalAuthenticationData -> { final BackupsDb.AuthenticationData authenticationData = optionalAuthenticationData .orElseGet(() -> { Metrics.counter(ZK_AUTHN_COUNTER_NAME, SUCCESS_TAG_NAME, String.valueOf(false), FAILURE_REASON_TAG_NAME, "missing_public_key") .increment(); // There was no stored public key, use a bunk public key so that validation will fail return new BackupsDb.AuthenticationData(INVALID_PUBLIC_KEY, null, null); }); return new AuthenticatedBackupUser( presentation.getBackupId(), signatureVerifier.verifySignature(signature, authenticationData.publicKey()), authenticationData.backupDir(), authenticationData.mediaDir()); }) .thenApply(result -> { Metrics.counter(ZK_AUTHN_COUNTER_NAME, SUCCESS_TAG_NAME, String.valueOf(true)).increment(); return result; }); }
@Test public void invalidPresentationNoPublicKey() throws VerificationFailedException { final BackupAuthCredentialPresentation invalidPresentation = backupAuthTestUtil.getPresentation( GenericServerSecretParams.generate(), BackupLevel.MESSAGES, backupKey, aci); final ECKeyPair keyPair = Curve.generateKeyPair(); // haven't set a public key yet, but should fail before hitting the database anyway assertThatExceptionOfType(StatusRuntimeException.class) .isThrownBy(() -> backupManager.authenticateBackupUser( invalidPresentation, keyPair.getPrivateKey().calculateSignature(invalidPresentation.serialize()))) .extracting(StatusRuntimeException::getStatus) .extracting(Status::getCode) .isEqualTo(Status.UNAUTHENTICATED.getCode()); }
public static SQLException toSQLException(final Exception cause, final DatabaseType databaseType) { if (cause instanceof SQLException) { return (SQLException) cause; } if (cause instanceof ShardingSphereSQLException) { return ((ShardingSphereSQLException) cause).toSQLException(); } if (cause instanceof SQLDialectException) { if (cause instanceof DatabaseProtocolException) { return new DatabaseProtocolSQLException(cause.getMessage()).toSQLException(); } Optional<SQLDialectExceptionMapper> dialectExceptionMapper = DatabaseTypedSPILoader.findService(SQLDialectExceptionMapper.class, databaseType); if (dialectExceptionMapper.isPresent()) { return dialectExceptionMapper.get().convert((SQLDialectException) cause); } } if (cause instanceof ShardingSphereServerException) { return new ServerSQLException(cause).toSQLException(); } return new UnknownSQLException(cause).toSQLException(); }
@Test void assertToSQLExceptionWithDatabaseProtocolException() { DatabaseProtocolException cause = mock(DatabaseProtocolException.class); when(cause.getMessage()).thenReturn("No reason"); SQLException actual = SQLExceptionTransformEngine.toSQLException(cause, databaseType); assertThat(actual.getSQLState(), is("HY000")); assertThat(actual.getErrorCode(), is(30002)); assertThat(actual.getMessage(), is("Database protocol exception: No reason")); }
@PUT @ApiOperation(value = "Updates the AWS default configuration.") @RequiresPermissions({RestPermissions.CLUSTER_CONFIG_ENTRY_CREATE, RestPermissions.CLUSTER_CONFIG_ENTRY_EDIT}) @AuditEvent(type = AuditEventTypes.CLUSTER_CONFIGURATION_UPDATE) public Response updateConfig(@Valid AWSPluginConfigurationUpdate update) { final AWSPluginConfiguration existingConfiguration = clusterConfigService.getOrDefault( AWSPluginConfiguration.class, AWSPluginConfiguration.createDefault() ); final AWSPluginConfiguration.Builder newConfigBuilder = existingConfiguration.toBuilder() .lookupsEnabled(update.lookupsEnabled()) .lookupRegions(update.lookupRegions()) .accessKey(update.accessKey()) .proxyEnabled(update.proxyEnabled()); final AWSPluginConfiguration newConfiguration = update.secretKey() .map(secretKey -> newConfigBuilder.secretKey(secretKey, systemConfiguration.getPasswordSecret())) .orElse(newConfigBuilder) .build(); clusterConfigService.write(newConfiguration); return Response.accepted(newConfiguration).build(); }
@Test public void updatesPreviouslyMissingConfig() { mockPreviousConfig(AWSPluginConfiguration.createDefault()); final AWSPluginConfigurationUpdate update = AWSPluginConfigurationUpdate.create( true, "lookupRegions", "myAccessKey", "aNewSecretKey", true ); this.awsConfigurationResource.updateConfig(update); final AWSPluginConfiguration writtenConfig = captureWrittenConfig(); assertThat(writtenConfig.lookupsEnabled()).isTrue(); assertThat(writtenConfig.lookupRegions()).isEqualTo("lookupRegions"); assertThat(writtenConfig.accessKey()).isEqualTo("myAccessKey"); assertThat(writtenConfig.proxyEnabled()).isTrue(); assertThat(writtenConfig.secretKey("verySecret123456")).isEqualTo("aNewSecretKey"); }
@Override public WRITE3Response write(XDR xdr, RpcInfo info) { SecurityHandler securityHandler = getSecurityHandler(info); RpcCall rpcCall = (RpcCall) info.header(); int xid = rpcCall.getXid(); SocketAddress remoteAddress = info.remoteAddress(); return write(xdr, info.channel(), xid, securityHandler, remoteAddress); }
@Test(timeout = 60000) public void testWrite() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); int namenodeId = Nfs3Utils.getNamenodeId(config); FileHandle handle = new FileHandle(dirId, namenodeId); byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = (byte) i; } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); XDR xdr_req = new XDR(); writeReq.serialize(xdr_req); // Attempt by an unpriviledged user should fail. WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect response:", null, response2); }
public List<Namespace> findPublicAppNamespaceAllNamespaces(String namespaceName, Pageable page) { AppNamespace publicAppNamespace = appNamespaceService.findPublicNamespaceByName(namespaceName); if (publicAppNamespace == null) { throw new BadRequestException( String.format("Public appNamespace not exists. NamespaceName = %s", namespaceName)); } List<Namespace> namespaces = namespaceRepository.findByNamespaceName(namespaceName, page); return filterChildNamespace(namespaces); }
@Test(expected = BadRequestException.class) public void testFindPublicAppNamespaceWithWrongNamespace() { Pageable page = PageRequest.of(0, 10); when(appNamespaceService.findPublicNamespaceByName(testPublicAppNamespace)).thenReturn(null); namespaceService.findPublicAppNamespaceAllNamespaces(testPublicAppNamespace, page); }
public static boolean isLocalElasticsearchEnabled(AppSettings settings) { // elasticsearch is enabled on "search" nodes, but disabled on "application" nodes if (isClusterEnabled(settings.getProps())) { return NodeType.parse(settings.getValue(CLUSTER_NODE_TYPE.getKey()).orElse("")) == NodeType.SEARCH; } // elasticsearch is enabled in standalone mode return true; }
@Test @UseDataProvider("validIPv4andIPv6Addresses") public void isLocalElasticsearchEnabled_returns_true_for_a_application_node(String host) { TestAppSettings settings = newSettingsForAppNode(host); assertThat(ClusterSettings.isLocalElasticsearchEnabled(settings)).isFalse(); }
public static RequestMatcher context(final String context) { return or(by(uri(context)), match(uri(URLs.join(context, ".*")))); }
@Test public void should_not_match_mismatch_uri() { RequestMatcher matcher = InternalApis.context("targets"); assertThat(matcher.match(requestByUri("something")), is(false)); assertThat(matcher.match(requestByUri("targetshello")), is(false)); }
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) { GuardedByExpression expr = BINDER.visit(exp, context); checkGuardedBy(expr != null, String.valueOf(exp)); checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp); return expr; }
@Test public void simpleFieldName() { assertThat( bind( "Test", "Other", forSourceLines( "threadsafety/Test.java", "package threadsafety;", "class Other {", " static final Object lock = new Object();", "}", "class Test {", " final Object Other = null;", "}"))) .isEqualTo("(SELECT (THIS) Other)"); }
@ExecuteOn(TaskExecutors.IO) @Post @Operation(tags = {"Templates"}, summary = "Create a template") public HttpResponse<Template> create( @Parameter(description = "The template") @Valid @Body Template template ) throws ConstraintViolationException { if (templateRepository.findById(tenantService.resolveTenant(), template.getNamespace(), template.getId()).isPresent()) { throw new ConstraintViolationException(Collections.singleton(ManualConstraintViolation.of( "Template id already exists", template, Template.class, "template.id", template.getId() ))); } return HttpResponse.ok(templateRepository.create(template)); }
@Test void create() { Template template = createTemplate(); HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> { client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/io.kestra.tests/" + template.getId())); }); assertThat(e.getStatus(), is(HttpStatus.NOT_FOUND)); Template result = client.toBlocking().retrieve(POST("/api/v1/templates", template), Template.class); Template createdTemplate = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/" + template.getNamespace() + "/" + template.getId()), Template.class); assertThat(createdTemplate.getId(), is(template.getId())); assertThat(createdTemplate.getDescription(), is("My template description")); }
int getPort() { return port; }
@Test void testJMXServiceRegisterMBean() throws Exception { TestObject testObject = new TestObject(); ObjectName testObjectName = new ObjectName("org.apache.flink.management", "key", "value"); MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); try { Optional<JMXServer> server = JMXService.getInstance(); assertThat(server).isPresent(); mBeanServer.registerMBean(testObject, testObjectName); JMXServiceURL url = new JMXServiceURL( "service:jmx:rmi://localhost:" + server.get().getPort() + "/jndi/rmi://localhost:" + server.get().getPort() + "/jmxrmi"); JMXConnector jmxConn = JMXConnectorFactory.connect(url); MBeanServerConnection mbeanConnConn = jmxConn.getMBeanServerConnection(); assertThat((int) mbeanConnConn.getAttribute(testObjectName, "Foo")).isOne(); mBeanServer.unregisterMBean(testObjectName); assertThatThrownBy(() -> mbeanConnConn.getAttribute(testObjectName, "Foo")) .isInstanceOf(InstanceNotFoundException.class); } finally { JMXService.stopInstance(); } }
public RecurringJobBuilder withDuration(Duration duration) { if (this.schedule != null) { throw new IllegalArgumentException("A schedule has already been provided."); } this.schedule = new Interval(duration); return this; }
@Test void testWithDuration() { RecurringJob recurringJob = aRecurringJob() .withDuration(Duration.ofMinutes(1)) .withDetails(() -> testService.doWork()) .build(jobDetailsGenerator); assertThat(recurringJob) .hasId() .hasScheduleExpression(duration1Minute); }
@Override public void rollbackMigration(PartitionMigrationEvent event) { if (event.getMigrationEndpoint() == DESTINATION) { clearRingbuffersHavingLesserBackupCountThan(event.getPartitionId(), event.getCurrentReplicaIndex()); } }
@Test public void rollbackMigration() { Ringbuffer ringbuffer = hz.getRingbuffer("foo"); int partitionId = getPartitionId(hz, ringbuffer.getName()); PartitionMigrationEvent partitionEvent = new PartitionMigrationEvent(DESTINATION, partitionId, -1, 0, UUID.randomUUID()); service.rollbackMigration(partitionEvent); assertEquals(0, service.getContainers().size()); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseLastWeekUS() throws Exception { final NaturalDateParser naturalDateParser = new NaturalDateParser(Locale.US); DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23"); NaturalDateParser.Result result = naturalDateParser.parse("last week", reference.toDate()); DateTime lastMonday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("30.05.2021 00:00:00"); DateTime nextMonday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("06.06.2021 00:00:00"); assertThat(result.getFrom()).as("should be equal to").isEqualTo(lastMonday); assertThat(result.getTo()).as("should be equal to").isEqualTo(nextMonday); }
public static List<String> extractArgs(String args) { List<String> programArgs = new ArrayList<>(); if (StrUtil.isNotEmpty(args)) { String[] array = args.split("\\s+"); Iterator<String> iter = Arrays.asList(array).iterator(); while (iter.hasNext()) { String v = iter.next(); String p = v.substring(0, 1); if (p.equals("'") || p.equals("\"")) { String value = v; if (!v.endsWith(p)) { while (!value.endsWith(p) && iter.hasNext()) { value += " " + iter.next(); } } programArgs.add(value.substring(1, value.length() - 1)); } else { programArgs.add(v); } } } return programArgs; }
@Test void extractArgs() { List<String> args1 = ExecuteJarOperation.extractArgs( "merge_into --warehouse hdfs:///tmp/paimon --database default --table T --source_table S --on \"T.id = S.order_id\" --merge_actions matched-upsert,matched-delete --matched_upsert_condition \"T.price > 100\" --matched_upsert_set \"mark = 'important'\" --matched_delete_condition \"T.price < 10\""); Assert.assertArrayEquals(args1.toArray(new String[0]), RESULT1.toArray(new String[0])); }
@Operation(summary = "authorizedUser", description = "AUTHORIZED_USER_NOTES") @Parameters({ @Parameter(name = "alertgroupId", description = "ALERT_GROUP_ID", required = true, schema = @Schema(implementation = String.class)) }) @GetMapping(value = "/authed-user") @ResponseStatus(HttpStatus.OK) @ApiException(AUTHORIZED_USER_ERROR) public Result authorizedUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("alertgroupId") Integer alertgroupId) { try { Map<String, Object> result = usersService.authorizedUser(loginUser, alertgroupId); return returnDataList(result); } catch (Exception e) { log.error(Status.AUTHORIZED_USER_ERROR.getMsg(), e); return error(Status.AUTHORIZED_USER_ERROR.getCode(), Status.AUTHORIZED_USER_ERROR.getMsg()); } }
@Disabled @Test public void testAuthorizedUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("alertgroupId", "1"); MvcResult mvcResult = mockMvc.perform(get("/users/authed-user") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
public static String toString(RedisCommand<?> command, Object... params) { if (RedisCommands.AUTH.equals(command)) { return "command: " + command + ", params: (password masked)"; } return "command: " + command + ", params: " + LogHelper.toString(params); }
@Test public void toStringWithNestedSmallCollections() { List<String> strings = Arrays.asList("0" ); List<Integer> ints = Arrays.asList( 1 ); List<Long> longs = Arrays.asList( 2L ); List<Double> doubles = Arrays.asList( 3.1D ); List<Float> floats = Arrays.asList( 4.2F ); List<Byte> bytes = Arrays.asList( (byte) 5 ); List<Character> chars = Arrays.asList( '6' ); Object[] input = new Object[] { strings, ints, longs, doubles, floats, bytes, chars }; assertThat(LogHelper.toString(input)).isEqualTo("[[0], [1], [2], [3.1], [4.2], [5], [6]]"); }