focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public AdAuthentication resolveAuthenticationResult(String httpSessionId) throws AdException { AdSession session = getAdSession(httpSessionId); AdAuthentication adAuthentication = new AdAuthentication(); adAuthentication.setLevel(session.getAuthenticationLevel()); adAuthentication.setStatus(session.getAuthenticationStatus()); adAuthentication.setEntityId(session.getEntityId()); adAuthentication.setEncryptionIdType(session.getEncryptionIdType()); adAuthentication.setBsn(session.getBsn()); adAuthentication.setPolymorphIdentity(session.getPolymorphIdentity()); adAuthentication.setPolymorphPseudonym(session.getPolymorphPseudonym()); return adAuthentication; }
@Test public void resolveAuthenticationResultFailed() { Exception result = assertThrows(AdException.class, () -> adService.resolveAuthenticationResult("httpSessionId")); assertEquals(result.getMessage(), "no adSession found"); }
public static KTableHolder<GenericKey> build( final KGroupedTableHolder groupedTable, final TableAggregate aggregate, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory) { return build( groupedTable, aggregate, buildContext, materializedFactory, new AggregateParamsFactory() ); }
@Test public void shouldBuildMaterializationCorrectlyForAggregate() { // When: final KTableHolder<?> result = aggregate.build(planBuilder, planInfo); // Then: assertThat(result.getMaterializationBuilder().isPresent(), is(true)); final MaterializationInfo info = result.getMaterializationBuilder().get().build(); assertThat(info.stateStoreName(), equalTo("agg-regate-Materialize")); assertThat(info.getSchema(), equalTo(AGGREGATE_SCHEMA)); assertThat(info.getStateStoreSchema(), equalTo(AGGREGATE_SCHEMA)); assertThat(info.getTransforms(), hasSize(1)); final MapperInfo aggMapInfo = (MapperInfo) info.getTransforms().get(0); final KsqlTransformer<Object, GenericRow> mapper = aggMapInfo.getMapper(name -> null); // Given: final GenericKey key = mock(GenericKey.class); final GenericRow value = mock(GenericRow.class); // When: mapper.transform(key, value, ctx); // Then: verify(resultMapper).transform(key, value, ctx); }
@Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException { return new CopyCommitter(getOutputPath(context), context); }
@Test public void testGetOutputCommitter() { try { TaskAttemptContext context = new TaskAttemptContextImpl(new Configuration(), new TaskAttemptID("200707121733", 1, TaskType.MAP, 1, 1)); context.getConfiguration().set("mapred.output.dir", "/out"); Assert.assertTrue(new CopyOutputFormat().getOutputCommitter(context) instanceof CopyCommitter); } catch (IOException e) { LOG.error("Exception encountered ", e); Assert.fail("Unable to get output committer"); } }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = authorizationService.validate(); try { if(log.isInfoEnabled()) { log.info(String.format("Attempt authentication with %s", credentials.getOauth())); } client.authenticate(new HubicAuthenticationRequest(credentials.getOauth().getAccessToken()), new HubicAuthenticationResponseHandler()); } catch(GenericException e) { throw new SwiftExceptionMappingService().map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test(expected = LoginCanceledException.class) public void testConnectInvalidRefreshToken() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new HubicProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/hubiC.cyberduckprofile")); final HubicSession session = new HubicSession(new Host(profile, new HubicProtocol().getDefaultHostname(), new Credentials("u@domain")), new DisabledX509TrustManager(), new DefaultX509KeyManager()); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); try { session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); } catch(LoginFailureException e) { assertEquals("Invalid refresh token. Please contact your web hosting service provider for assistance.", e.getDetail()); throw e; } session.close(); }
@SuppressWarnings("PMD.UndefineMagicConstantRule") public static Member singleParse(String member) { // Nacos default port is 8848 int defaultPort = EnvUtil.getProperty(SERVER_PORT_PROPERTY, Integer.class, DEFAULT_SERVER_PORT); // Set the default Raft port information for securit String address = member; int port = defaultPort; String[] info = InternetAddressUtil.splitIPPortStr(address); if (info.length > 1) { address = info[0]; port = Integer.parseInt(info[1]); } Member target = Member.builder().ip(address).port(port).state(NodeState.UP).build(); Map<String, Object> extendInfo = new HashMap<>(4); // The Raft Port information needs to be set by default extendInfo.put(MemberMetaDataConstants.RAFT_PORT, String.valueOf(calculateRaftPort(target))); extendInfo.put(MemberMetaDataConstants.READY_TO_UPGRADE, true); target.setExtendInfo(extendInfo); // use grpc to report default target.setGrpcReportEnabled(true); return target; }
@Test void testSingleParseWithoutPort() { Member actual = MemberUtil.singleParse(IP); assertEquals(IP, actual.getIp()); assertEquals(PORT, actual.getPort()); assertEquals(IP + ":" + PORT, actual.getAddress()); assertEquals(NodeState.UP, actual.getState()); assertTrue((Boolean) actual.getExtendVal(MemberMetaDataConstants.READY_TO_UPGRADE)); assertEquals("7848", actual.getExtendVal(MemberMetaDataConstants.RAFT_PORT)); assertFalse(actual.getAbilities().getRemoteAbility().isSupportRemoteConnection()); }
public static Config merge(Config config, Config fallback) { var root1 = config.root(); var root2 = fallback.root(); var origin = new ContainerConfigOrigin(config.origin(), fallback.origin()); var path = ConfigValuePath.root(); var newRoot = mergeObjects(origin, path, root1, root2); return new SimpleConfig(origin, newRoot); }
@Test void testMergeRoots() { var config1 = MapConfigFactory.fromMap(Map.of( "field1", "value1" )); var config2 = MapConfigFactory.fromMap(Map.of( "field2", "value2" )); var config = MergeConfigFactory.merge(config1, config2); assertThat(config.get(ConfigValuePath.root().child("field1"))) .isInstanceOf(ConfigValue.StringValue.class) .hasFieldOrPropertyWithValue("value", "value1"); assertThat(config.get(ConfigValuePath.root().child("field2"))) .isInstanceOf(ConfigValue.StringValue.class) .hasFieldOrPropertyWithValue("value", "value2"); }
@Override public MaterializedTable nonWindowed() { return new KsqlMaterializedTable(inner.nonWindowed()); }
@SuppressWarnings("OptionalGetWithoutIsPresent") @Test public void shouldReturnSelectTransformedFromNonWindowed() { // Given: final MaterializedTable table = materialization.nonWindowed(); givenNoopFilter(); when(project.apply(any(), any(), any())).thenReturn(Optional.of(transformed)); // When: final Iterator<Row> result = table.get(aKey, partition); // Then: assertThat(result, is(not(Optional.empty()))); assertThat(result.hasNext(), is(true)); final Row row = result.next(); assertThat(row.key(), is(aKey)); assertThat(row.window(), is(Optional.empty())); assertThat(row.value(), is(transformed)); }
public Map<COSObjectKey, COSBase> parseAllObjects() throws IOException { Map<COSObjectKey, COSBase> allObjects = new HashMap<>(); try { Map<Integer, Long> objectNumbers = privateReadObjectOffsets(); // count the number of object numbers eliminating double entries long numberOfObjNumbers = objectNumbers.values().stream().distinct().count(); // the usage of the index should be restricted to cases where more than one // object use the same object number. // there are malformed pdfs in the wild which would lead to false results if // pdfbox always relies on the index if available. In most cases the object number // is sufficient to choose the correct object boolean indexNeeded = objectNumbers.size() > numberOfObjNumbers; long currentPosition = source.getPosition(); if (firstObject > 0 && currentPosition < firstObject) { source.skip(firstObject - (int) currentPosition); } int index = 0; for (Entry<Integer, Long> entry : objectNumbers.entrySet()) { COSObjectKey objectKey = getObjectKey(entry.getValue(), 0); // skip object if the index doesn't match if (indexNeeded && objectKey.getStreamIndex() > -1 && objectKey.getStreamIndex() != index) { index++; continue; } int finalPosition = firstObject + entry.getKey(); currentPosition = source.getPosition(); if (finalPosition > 0 && currentPosition < finalPosition) { // jump to the offset of the object to be parsed source.skip(finalPosition - (int) currentPosition); } COSBase streamObject = parseDirObject(); if (streamObject != null) { streamObject.setDirect(false); } allObjects.put(objectKey, streamObject); index++; } } finally { source.close(); document = null; } return allObjects; }
@Test void testParseAllObjectsIndexed() throws IOException { COSStream stream = new COSStream(); stream.setItem(COSName.N, COSInteger.THREE); stream.setItem(COSName.FIRST, COSInteger.get(13)); OutputStream outputStream = stream.createOutputStream(); // use object number 4 for two objects outputStream.write("6 0 4 5 4 11 true false true".getBytes()); outputStream.close(); COSDocument cosDoc = new COSDocument(); Map<COSObjectKey, Long> xrefTable = cosDoc.getXrefTable(); // select the second object from the stream for object number 4 by using 2 as value for the index xrefTable.put(new COSObjectKey(6, 0, 0), -1L); xrefTable.put(new COSObjectKey(4, 0, 2), -1L); PDFObjectStreamParser objectStreamParser = new PDFObjectStreamParser(stream, cosDoc); Map<COSObjectKey, COSBase> objectNumbers = objectStreamParser.parseAllObjects(); assertEquals(2, objectNumbers.size()); assertEquals(COSBoolean.TRUE, objectNumbers.get(new COSObjectKey(6, 0))); assertEquals(COSBoolean.TRUE, objectNumbers.get(new COSObjectKey(4, 0))); // select the first object from the stream for object number 4 by using 1 as value for the index // remove the old entry first to be sure it is replaced xrefTable.remove(new COSObjectKey(4, 0)); xrefTable.put(new COSObjectKey(4, 0, 1), -1L); objectStreamParser = new PDFObjectStreamParser(stream, cosDoc); objectNumbers = objectStreamParser.parseAllObjects(); assertEquals(2, objectNumbers.size()); assertEquals(COSBoolean.TRUE, objectNumbers.get(new COSObjectKey(6, 0))); assertEquals(COSBoolean.FALSE, objectNumbers.get(new COSObjectKey(4, 0))); }
public void execute(){ logger.debug("[" + getOperationName() + "] Starting execution of paged operation. maximum time: " + maxTime + ", maximum pages: " + maxPages); long startTime = System.currentTimeMillis(); long executionTime = 0; int i = 0; int exceptionsSwallowedCount = 0; int operationsCompleted = 0; Set<String> exceptionsSwallowedClasses = new HashSet<String>(); while (i< maxPages && executionTime < maxTime){ Collection<T> page = fetchPage(); if(page == null || page.size() == 0){ break; } for (T item : page) { try { doOperation(item); operationsCompleted++; } catch (Exception e){ if(swallowExceptions){ exceptionsSwallowedCount++; exceptionsSwallowedClasses.add(e.getClass().getName()); logger.debug("Swallowing exception " + e.getMessage(), e); } else { logger.debug("Rethrowing exception " + e.getMessage()); throw e; } } } i++; executionTime = System.currentTimeMillis() - startTime; } finalReport(operationsCompleted, exceptionsSwallowedCount, exceptionsSwallowedClasses); }
@Test(timeout = 1000L) public void execute_negtime(){ Long timeMillis = -100L; CountingPageOperation op = new CountingPageOperation(Integer.MAX_VALUE,timeMillis); op.execute(); assertEquals(0L, op.getCounter()); }
public static Schema getPinotSchemaFromJsonFile(File jsonFile, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, @Nullable List<String> fieldsToUnnest, String delimiter, ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson) throws IOException { JsonNode jsonNode = fileToFirstJsonNode(jsonFile); if (fieldsToUnnest == null) { fieldsToUnnest = new ArrayList<>(); } Preconditions.checkNotNull(jsonNode, "the JSON data shall be an object but it is null"); Preconditions.checkState(jsonNode.isObject(), "the JSON data shall be an object"); return getPinotSchemaFromJsonNode(jsonNode, fieldTypeMap, timeUnit, fieldsToUnnest, delimiter, collectionNotUnnestedToJson); }
@Test public void testInferSchema() throws Exception { ClassLoader classLoader = JsonUtilsTest.class.getClassLoader(); File file = new File(Objects.requireNonNull(classLoader.getResource(JSON_FILE)).getFile()); Map<String, FieldSpec.FieldType> fieldSpecMap = ImmutableMap.of("d1", FieldSpec.FieldType.DIMENSION, "hoursSinceEpoch", FieldSpec.FieldType.DATE_TIME, "m1", FieldSpec.FieldType.METRIC); Schema inferredPinotSchema = JsonUtils.getPinotSchemaFromJsonFile(file, fieldSpecMap, TimeUnit.HOURS, new ArrayList<>(), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", FieldSpec.DataType.STRING) .addMetric("m1", FieldSpec.DataType.INT) .addSingleValueDimension("tuple.address.streetaddress", FieldSpec.DataType.STRING) .addSingleValueDimension("tuple.address.city", FieldSpec.DataType.STRING) .addSingleValueDimension("entries", FieldSpec.DataType.STRING) .addMultiValueDimension("d2", FieldSpec.DataType.INT) .addDateTime("hoursSinceEpoch", FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build(); Assert.assertEquals(inferredPinotSchema, expectedSchema); // unnest collection entries inferredPinotSchema = JsonUtils.getPinotSchemaFromJsonFile(file, fieldSpecMap, TimeUnit.HOURS, Collections.singletonList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", FieldSpec.DataType.STRING) .addMetric("m1", FieldSpec.DataType.INT) .addSingleValueDimension("tuple.address.streetaddress", FieldSpec.DataType.STRING) .addSingleValueDimension("tuple.address.city", FieldSpec.DataType.STRING) .addSingleValueDimension("entries.id", FieldSpec.DataType.INT) .addSingleValueDimension("entries.description", FieldSpec.DataType.STRING) .addMultiValueDimension("d2", FieldSpec.DataType.INT) .addDateTime("hoursSinceEpoch", FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build(); Assert.assertEquals(inferredPinotSchema, expectedSchema); // change delimiter inferredPinotSchema = JsonUtils.getPinotSchemaFromJsonFile(file, fieldSpecMap, TimeUnit.HOURS, Collections.singletonList(""), "_", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", FieldSpec.DataType.STRING) .addMetric("m1", FieldSpec.DataType.INT) .addSingleValueDimension("tuple_address_streetaddress", FieldSpec.DataType.STRING) .addSingleValueDimension("tuple_address_city", FieldSpec.DataType.STRING) .addSingleValueDimension("entries", FieldSpec.DataType.STRING) .addMultiValueDimension("d2", FieldSpec.DataType.INT) .addDateTime("hoursSinceEpoch", FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build(); Assert.assertEquals(inferredPinotSchema, expectedSchema); // change the handling of collection-to-json option, d2 will become string inferredPinotSchema = JsonUtils.getPinotSchemaFromJsonFile(file, fieldSpecMap, TimeUnit.HOURS, Collections.singletonList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.ALL); expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", FieldSpec.DataType.STRING) .addMetric("m1", FieldSpec.DataType.INT) .addSingleValueDimension("tuple.address.streetaddress", FieldSpec.DataType.STRING) .addSingleValueDimension("tuple.address.city", FieldSpec.DataType.STRING) .addSingleValueDimension("entries.id", FieldSpec.DataType.INT) .addSingleValueDimension("entries.description", FieldSpec.DataType.STRING) .addSingleValueDimension("d2", FieldSpec.DataType.STRING) .addDateTime("hoursSinceEpoch", FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build(); Assert.assertEquals(inferredPinotSchema, expectedSchema); }
public SymmetricEncryptionConfig setPassword(String password) { this.password = password; return this; }
@Test public void testSetPassword() { config.setPassword("myPassword"); assertEquals("myPassword", config.getPassword()); }
@Override public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) { SimpleAdminApiFuture<TopicPartition, DeletedRecords> future = DeleteRecordsHandler.newFuture(recordsToDelete.keySet()); int timeoutMs = defaultApiTimeoutMs; if (options.timeoutMs() != null) { timeoutMs = options.timeoutMs(); } DeleteRecordsHandler handler = new DeleteRecordsHandler(recordsToDelete, logContext, timeoutMs); invokeDriver(handler, future, options.timeoutMs); return new DeleteRecordsResult(future.all()); }
@Test public void testDeleteRecords() throws Exception { HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); List<PartitionInfo> partitionInfos = new ArrayList<>(); partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 2, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] {nodes.get(0)}, new Node[] {nodes.get(0)})); Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.emptySet(), Collections.emptySet(), nodes.get(0)); TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.LEADER_NOT_AVAILABLE)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); DeleteRecordsResponseData m = new DeleteRecordsResponseData(); m.topics().add(new DeleteRecordsResponseData.DeleteRecordsTopicResult().setName(myTopicPartition0.topic()) .setPartitions(new DeleteRecordsResponseData.DeleteRecordsPartitionResultCollection(asList( new DeleteRecordsResponseData.DeleteRecordsPartitionResult() .setPartitionIndex(myTopicPartition0.partition()) .setLowWatermark(3) .setErrorCode(Errors.NONE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult() .setPartitionIndex(myTopicPartition1.partition()) .setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult() .setPartitionIndex(myTopicPartition2.partition()) .setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK) .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()) ).iterator()))); env.kafkaClient().prepareResponse(new DeleteRecordsResponse(m)); Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>(); recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L)); recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L)); recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L)); recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L)); DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete); // success on records deletion for partition 0 Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks(); KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0); long myTopicPartition0lowWatermark = myTopicPartition0Result.get().lowWatermark(); assertEquals(3, myTopicPartition0lowWatermark); // "offset out of range" failure on records deletion for partition 1 KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1); assertInstanceOf(OffsetOutOfRangeException.class, assertThrows(ExecutionException.class, myTopicPartition1Result::get).getCause()); // not authorized to delete records for partition 2 KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2); assertInstanceOf(TopicAuthorizationException.class, assertThrows(ExecutionException.class, myTopicPartition2Result::get).getCause()); // the response does not contain a result for partition 3 KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3); assertInstanceOf(ApiException.class, assertThrows(ExecutionException.class, myTopicPartition3Result::get).getCause()); } }
public static void validateKerberosPrincipal( KerberosPrincipal kerberosPrincipal) throws IOException { if (!StringUtils.isEmpty(kerberosPrincipal.getPrincipalName())) { if (!kerberosPrincipal.getPrincipalName().contains("/")) { throw new IllegalArgumentException(String.format( RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT, kerberosPrincipal.getPrincipalName())); } } }
@Test public void testKerberosPrincipal() throws IOException { SliderFileSystem sfs = ServiceTestUtils.initMockFs(); Service app = createValidApplication("comp-a"); KerberosPrincipal kp = new KerberosPrincipal(); kp.setKeytab("file:///tmp/a.keytab"); kp.setPrincipalName("user/_HOST@domain.com"); app.setKerberosPrincipal(kp); // This should succeed try { ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal()); } catch (IllegalArgumentException e) { Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); } // Keytab with no URI scheme should succeed too kp.setKeytab("/some/path"); try { ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal()); } catch (IllegalArgumentException e) { Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); } }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { Set<Long> deptIds = StrUtils.splitToLongSet(param); List<DeptRespDTO> depts = deptApi.getDeptList(deptIds).getCheckedData(); return convertSet(depts, DeptRespDTO::getLeaderUserId); }
@Test public void testCalculateUsers() { // 准备参数 String param = "1,2"; // mock 方法 DeptRespDTO dept1 = randomPojo(DeptRespDTO.class, o -> o.setLeaderUserId(11L)); DeptRespDTO dept2 = randomPojo(DeptRespDTO.class, o -> o.setLeaderUserId(22L)); when(deptApi.getDeptList(eq(asSet(1L, 2L)))).thenReturn(success(asList(dept1, dept2))); // 调用 Set<Long> results = strategy.calculateUsers(null, param); // 断言 assertEquals(asSet(11L, 22L), results); }
public String getClassName() { return classname; }
@Test public void testGetClassName() { Permission permission = new Permission("classname", "name"); assertEquals("classname", permission.getClassName()); }
static long sizeOf(Mutation m) { if (m.getOperation() == Mutation.Op.DELETE) { return sizeOf(m.getKeySet()); } long result = 0; for (Value v : m.getValues()) { switch (v.getType().getCode()) { case ARRAY: result += estimateArrayValue(v); break; case STRUCT: throw new IllegalArgumentException("Structs are not supported in mutation."); default: result += estimatePrimitiveValue(v); } } return result; }
@Test public void deleteKeyRanges() throws Exception { Mutation range = Mutation.delete("test", KeySet.range(KeyRange.openOpen(Key.of(1L), Key.of(4L)))); assertThat(MutationSizeEstimator.sizeOf(range), is(16L)); }
public void parse(DataByteArrayInputStream input, int readSize) throws Exception { if (currentParser == null) { currentParser = initializeHeaderParser(); } // Parser stack will run until current incoming data has all been consumed. currentParser.parse(input, readSize); }
@Test public void testEmptyConnectBytes() throws Exception { CONNECT connect = new CONNECT(); connect.cleanSession(true); connect.clientId(new UTF8Buffer("")); DataByteArrayOutputStream output = new DataByteArrayOutputStream(); wireFormat.marshal(connect.encode(), output); Buffer marshalled = output.toBuffer(); DataByteArrayInputStream input = new DataByteArrayInputStream(marshalled); codec.parse(input, marshalled.length()); assertTrue(!frames.isEmpty()); assertEquals(1, frames.size()); connect = new CONNECT().decode(frames.get(0)); LOG.info("Unmarshalled: {}", connect); assertTrue(connect.cleanSession()); }
@Override public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final CreateFileUploadResponse uploadResponse = upload.start(file, status); final String uploadUrl = uploadResponse.getUploadUrl(); if(StringUtils.isBlank(uploadUrl)) { throw new InteroperabilityException("Missing upload URL in server response"); } final String uploadToken = uploadResponse.getToken(); if(StringUtils.isBlank(uploadToken)) { throw new InteroperabilityException("Missing upload token in server response"); } final MultipartUploadTokenOutputStream proxy = new MultipartUploadTokenOutputStream(session, nodeid, file, status, uploadUrl); return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize")), new SDSAttributesAdapter(session), status) { private final AtomicBoolean close = new AtomicBoolean(); private final AtomicReference<Node> node = new AtomicReference<>(); @Override public Node getStatus() { return node.get(); } @Override public void close() throws IOException { try { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } super.close(); node.set(upload.complete(file, uploadToken, status)); } catch(BackgroundException e) { throw new IOException(e); } finally { close.set(true); } } @Override protected void handleIOException(final IOException e) throws IOException { // Cancel upload on error reply try { upload.cancel(file, uploadToken); } catch(BackgroundException f) { log.warn(String.format("Failure %s cancelling upload for file %s with upload token %s after failure %s", f, file, uploadToken, e)); } throw e; } }; }
@Test public void testReadWrite() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(32769); final Path test = new Path(room, new NFDNormalizer().normalize(String.format("ä%s", new AlphanumericRandomStringService().random())).toString(), EnumSet.of(Path.Type.file)); { final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setChecksum(new MD5ChecksumCompute().compute(new ByteArrayInputStream(content), new TransferStatus())); status.setMime("text/plain"); status.setModified(1632127025217L); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertEquals(content.length, out.getStatus().getSize(), 0L); } assertNotNull(test.attributes().getVersionId()); assertTrue(new DefaultFindFeature(session).find(test)); assertTrue(new SDSFindFeature(session, nodeid).find(test)); final PathAttributes attr = new SDSAttributesFinderFeature(session, nodeid).find(test); assertEquals(test.attributes().getVersionId(), attr.getVersionId()); assertEquals(1632127025217L, attr.getModificationDate()); assertEquals(1632127025217L, new DefaultAttributesFinderFeature(session).find(test).getModificationDate()); final byte[] compare = new byte[content.length]; final InputStream stream = new SDSReadFeature(session, nodeid).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); String previousVersion = test.attributes().getVersionId(); // Overwrite { final byte[] change = RandomUtils.nextBytes(256); final TransferStatus status = new TransferStatus(); status.setLength(change.length); final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status.exists(true), new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(change), out); assertNotEquals(test.attributes().getVersionId(), out.getStatus()); } // Overwrite with exists=false { final byte[] change = RandomUtils.nextBytes(124); final TransferStatus status = new TransferStatus(); status.setLength(change.length); final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status.exists(false), new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(change), out); assertNotEquals(test.attributes().getVersionId(), out.getStatus()); } assertNotEquals(attr.getRevision(), new SDSAttributesFinderFeature(session, nodeid).find(test)); // Read with previous version must fail try { test.attributes().withVersionId(previousVersion); new SDSReadFeature(session, nodeid).read(test, new TransferStatus(), new DisabledConnectionCallback()); fail(); } catch(NotfoundException e) { // Expected } new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public List<String> column(final int column) { return new ColumnView(column); }
@Test void column_should_throw_for_negative_row_value() { assertThrows(IndexOutOfBoundsException.class, () -> createSimpleTable().column(0).get(-1)); }
@Override public synchronized ManagerSpec getManagerSpec() { Set<Long> rootGroupIds = new HashSet<>(); Map<Long, ResourceGroupSpec> resourceGroupSpecMap = new HashMap<>(); Map<Long, ResourceGroupIdTemplate> resourceGroupIdTemplateMap = new HashMap<>(); Map<Long, ResourceGroupSpecBuilder> recordMap = new HashMap<>(); Map<Long, Set<Long>> subGroupIdsToBuild = new HashMap<>(); populateFromDbHelper(recordMap, rootGroupIds, resourceGroupIdTemplateMap, subGroupIdsToBuild); // Build up resource group specs from root to leaf for (LinkedList<Long> queue = new LinkedList<>(rootGroupIds); !queue.isEmpty(); ) { Long id = queue.pollFirst(); resourceGroupIdTemplateMap.computeIfAbsent(id, k -> { ResourceGroupSpecBuilder builder = recordMap.get(k); return ResourceGroupIdTemplate.forSubGroupNamed( resourceGroupIdTemplateMap.get(builder.getParentId().get()), builder.getNameTemplate().toString()); }); Set<Long> childrenToBuild = subGroupIdsToBuild.getOrDefault(id, ImmutableSet.of()); // Add to resource group specs if no more child resource groups are left to build if (childrenToBuild.isEmpty()) { ResourceGroupSpecBuilder builder = recordMap.get(id); ResourceGroupSpec resourceGroupSpec = builder.build(); resourceGroupSpecMap.put(id, resourceGroupSpec); // Add this resource group spec to parent subgroups and remove id from subgroup ids to build builder.getParentId().ifPresent(parentId -> { recordMap.get(parentId).addSubGroup(resourceGroupSpec); subGroupIdsToBuild.get(parentId).remove(id); }); } else { // Add this group back to queue since it still has subgroups to build queue.addFirst(id); // Add this group's subgroups to the queue so that when this id is dequeued again childrenToBuild will be empty queue.addAll(0, childrenToBuild); } } // Specs are built from db records, validate and return manager spec List<ResourceGroupSpec> rootGroups = rootGroupIds.stream().map(resourceGroupSpecMap::get).collect(toList()); List<SelectorSpec> selectors = resourceGroupsDao.getSelectors(environment) .stream() .map(selectorRecord -> new SelectorSpec( selectorRecord.getUserRegex(), selectorRecord.getSourceRegex(), selectorRecord.getQueryType(), selectorRecord.getClientTags(), selectorRecord.getSelectorResourceEstimate(), selectorRecord.getClientInfoRegex(), selectorRecord.getSchema(), selectorRecord.getPrincipalRegex(), resourceGroupIdTemplateMap.get(selectorRecord.getResourceGroupId())) ).collect(toList()); ManagerSpec managerSpec = new ManagerSpec(rootGroups, selectors, getCpuQuotaPeriodFromDb()); return managerSpec; }
@Test public void testEnvironments() { H2DaoProvider daoProvider = setup("test_configuration"); H2ResourceGroupsDao dao = daoProvider.get(); dao.createResourceGroupsGlobalPropertiesTable(); dao.createResourceGroupsTable(); dao.createSelectorsTable(); String prodEnvironment = "prod"; String devEnvironment = "dev"; dao.insertResourceGroupsGlobalProperties("cpu_quota_period", "1h"); // two resource groups are the same except the group for the prod environment has a larger softMemoryLimit dao.insertResourceGroup(1, "prod_global", "10MB", 1000, 100, 100, "weighted", null, true, "1h", "1d", "1h", "1MB", "1h", 0, null, prodEnvironment); dao.insertResourceGroup(2, "dev_global", "1MB", 1000, 100, 100, "weighted", null, true, "1h", "1d", "1h", "1MB", "1h", 0, null, devEnvironment); dao.insertSelector(1, 1, ".*prod_user.*", null, null, null, null, null); dao.insertSelector(2, 2, ".*dev_user.*", null, null, null, null, null); // check the prod configuration DbManagerSpecProvider dbManagerSpecProvider = new DbManagerSpecProvider(daoProvider.get(), prodEnvironment, new ReloadingResourceGroupConfig()); ManagerSpec managerSpec = dbManagerSpecProvider.getManagerSpec(); assertEquals(managerSpec.getRootGroups().size(), 1); assertEquals(managerSpec.getSelectors().size(), 1); SelectorSpec prodSelector = managerSpec.getSelectors().get(0); assertEquals(prodSelector.getGroup().toString(), "prod_global"); // check the dev configuration dbManagerSpecProvider = new DbManagerSpecProvider(daoProvider.get(), devEnvironment, new ReloadingResourceGroupConfig()); managerSpec = dbManagerSpecProvider.getManagerSpec(); assertEquals(managerSpec.getRootGroups().size(), 1); assertEquals(managerSpec.getSelectors().size(), 1); prodSelector = managerSpec.getSelectors().get(0); assertEquals(prodSelector.getGroup().toString(), "dev_global"); }
public byte[] readEvent() throws IOException { try { if (!channel.isOpen() || !consumeToStartOfEvent()) { return null; } RecordHeader header = RecordHeader.get(currentBlock); streamPosition += RECORD_HEADER_SIZE; int cumReadSize = 0; int bufferSize = header.getTotalEventSize().orElseGet(header::getSize); ByteBuffer buffer = ByteBuffer.allocate(bufferSize); getRecord(buffer, header); cumReadSize += header.getSize(); while (cumReadSize < bufferSize) { maybeRollToNextBlock(); RecordHeader nextHeader = RecordHeader.get(currentBlock); streamPosition += RECORD_HEADER_SIZE; getRecord(buffer, nextHeader); cumReadSize += nextHeader.getSize(); } return buffer.array(); } catch (ClosedByInterruptException e) { return null; } }
@Test public void testReadWhileWriteAcrossBoundary() throws Exception { char[] tooBig = fillArray( BLOCK_SIZE/4); StringElement input = new StringElement(new String(tooBig)); byte[] inputSerialized = input.serialize(); try(RecordIOWriter writer = new RecordIOWriter(file); RecordIOReader reader = new RecordIOReader(file)){ for (int j = 0; j < 2; j++) { writer.writeEvent(inputSerialized); } assertThat(reader.readEvent(), equalTo(inputSerialized)); for (int j = 0; j < 2; j++) { writer.writeEvent(inputSerialized); } for (int j = 0; j < 3; j++) { assertThat(reader.readEvent(), equalTo(inputSerialized)); } } }
public boolean deleteGroupCapacity(final String group) { try { GroupCapacityMapper groupCapacityMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.GROUP_CAPACITY); PreparedStatementCreator preparedStatementCreator = connection -> { PreparedStatement ps = connection.prepareStatement( groupCapacityMapper.delete(Collections.singletonList("group_id"))); ps.setString(1, group); return ps; }; return jdbcTemplate.update(preparedStatementCreator) == 1; } catch (CannotGetJdbcConnectionException e) { FATAL_LOG.error("[db-error]", e); throw e; } }
@Test void testDeleteGroupCapacity() { when(jdbcTemplate.update(any(PreparedStatementCreator.class))).thenReturn(1); assertTrue(service.deleteGroupCapacity("test")); //mock get connection fail when(jdbcTemplate.update(any(PreparedStatementCreator.class))).thenThrow(new CannotGetJdbcConnectionException("conn fail")); try { service.deleteGroupCapacity("test"); assertTrue(false); } catch (Exception e) { assertEquals("conn fail", e.getMessage()); } }
public Span nextSpan(TraceContextOrSamplingFlags extracted) { if (extracted == null) throw new NullPointerException("extracted == null"); TraceContext context = extracted.context(); if (context != null) return newChild(context); TraceIdContext traceIdContext = extracted.traceIdContext(); if (traceIdContext != null) { return _toSpan(null, decorateContext( InternalPropagation.instance.flags(extracted.traceIdContext()), traceIdContext.traceIdHigh(), traceIdContext.traceId(), 0L, 0L, 0L, extracted.extra() )); } SamplingFlags samplingFlags = extracted.samplingFlags(); List<Object> extra = extracted.extra(); TraceContext parent = currentTraceContext.get(); int flags; long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L; if (parent != null) { // At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical // trace sampling is up front, we retain the decision from the parent. flags = InternalPropagation.instance.flags(parent); traceIdHigh = parent.traceIdHigh(); traceId = parent.traceId(); localRootId = parent.localRootId(); spanId = parent.spanId(); extra = concat(extra, parent.extra()); } else { flags = InternalPropagation.instance.flags(samplingFlags); } return _toSpan(parent, decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra)); }
@Test void localRootId_nextSpan_sampled() { TraceContext context1 = TraceContext.newBuilder().traceId(1).spanId(2).sampled(true).build(); TraceContext context2 = TraceContext.newBuilder().traceId(1).spanId(3).sampled(true).build(); localRootId(context1, context2, ctx -> tracer.nextSpan(ctx)); }
@Override public Long sendSingleNotifyToMember(Long userId, String templateCode, Map<String, Object> templateParams) { return sendSingleNotify(userId, UserTypeEnum.MEMBER.getValue(), templateCode, templateParams); }
@Test public void testSendSingleNotifyToMember() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock NotifyTemplateService 的方法 NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(notifyTemplateService.getNotifyTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(notifyTemplateService.formatNotifyTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock NotifyMessageService 的方法 Long messageId = randomLongId(); when(notifyMessageService.createNotifyMessage(eq(userId), eq(UserTypeEnum.MEMBER.getValue()), eq(template), eq(content), eq(templateParams))).thenReturn(messageId); // 调用 Long resultMessageId = notifySendService.sendSingleNotifyToMember(userId, templateCode, templateParams); // 断言 assertEquals(messageId, resultMessageId); }
public static ConnectToSqlTypeConverter connectToSqlConverter() { return CONNECT_TO_SQL_CONVERTER; }
@Test public void shouldConvertNestedComplexToSql() { assertThat(SchemaConverters.connectToSqlConverter().toSqlType(NESTED_LOGICAL_TYPE), is(NESTED_SQL_TYPE)); }
public static <T> T newInstanceOrNull(Class<? extends T> clazz, Object... params) { Constructor<T> constructor = selectMatchingConstructor(clazz, params); if (constructor == null) { return null; } try { return constructor.newInstance(params); } catch (IllegalAccessException | InstantiationException | InvocationTargetException e) { return null; } }
@Test public void newInstanceOrNull_primitiveArgInConstructorPassingNull() { ClassWithTwoConstructorsIncludingPrimitives instance = InstantiationUtils.newInstanceOrNull( ClassWithTwoConstructorsIncludingPrimitives.class, 42, null); assertNotNull(instance); }
public static boolean isFloatingNumber(String text) { final int startPos = findStartPosition(text); if (startPos < 0) { return false; } boolean dots = false; for (int i = startPos; i < text.length(); i++) { char ch = text.charAt(i); if (!Character.isDigit(ch)) { if (ch == '.') { if (dots) { return false; } dots = true; } else { return false; } } } return true; }
@Test @DisplayName("Tests that isFloatingNumber returns true for floats") void isFloatingNumberFloats() { assertTrue(ObjectHelper.isFloatingNumber("12.34")); assertTrue(ObjectHelper.isFloatingNumber("-12.34")); assertTrue(ObjectHelper.isFloatingNumber("1.0")); assertTrue(ObjectHelper.isFloatingNumber("0.0")); }
@VisibleForTesting int getSleepDuration() { return sleepDuration; }
@Test public void testNoMetricUpdatesThenNoWaiting() { AbfsClientThrottlingAnalyzer analyzer = new AbfsClientThrottlingAnalyzer( "test", abfsConfiguration); validate(0, analyzer.getSleepDuration()); sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); validate(0, analyzer.getSleepDuration()); }
public boolean allSearchFiltersVisible() { return hiddenSearchFiltersIDs.isEmpty(); }
@Test void testAllSearchFiltersVisibleReturnsTrueOnEmptyHiddenFilters() { toTest = new SearchFilterVisibilityCheckStatus(Collections.emptyList()); assertTrue(toTest.allSearchFiltersVisible()); }
public CeQueueDto setSubmitterUuid(@Nullable String s) { checkArgument(s == null || s.length() <= 255, "Value of submitter uuid is too long: %s", s); this.submitterUuid = s; return this; }
@Test void setSubmitterLogin_throws_IAE_if_value_is_41_chars() { String str_256_chars = STR_255_CHARS + "a"; assertThatThrownBy(() -> underTest.setSubmitterUuid(str_256_chars)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Value of submitter uuid is too long: " + str_256_chars); }
@Override public ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions, ListPartitionReassignmentsOptions options) { final KafkaFutureImpl<Map<TopicPartition, PartitionReassignment>> partitionReassignmentsFuture = new KafkaFutureImpl<>(); if (partitions.isPresent()) { for (TopicPartition tp : partitions.get()) { String topic = tp.topic(); int partition = tp.partition(); if (topicNameIsUnrepresentable(topic)) { partitionReassignmentsFuture.completeExceptionally(new InvalidTopicException("The given topic name '" + topic + "' cannot be represented in a request.")); } else if (partition < 0) { partitionReassignmentsFuture.completeExceptionally(new InvalidTopicException("The given partition index " + partition + " is not valid.")); } if (partitionReassignmentsFuture.isCompletedExceptionally()) return new ListPartitionReassignmentsResult(partitionReassignmentsFuture); } } final long now = time.milliseconds(); runnable.call(new Call("listPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) { @Override ListPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) { ListPartitionReassignmentsRequestData listData = new ListPartitionReassignmentsRequestData(); listData.setTimeoutMs(timeoutMs); if (partitions.isPresent()) { Map<String, ListPartitionReassignmentsTopics> reassignmentTopicByTopicName = new HashMap<>(); for (TopicPartition tp : partitions.get()) { if (!reassignmentTopicByTopicName.containsKey(tp.topic())) reassignmentTopicByTopicName.put(tp.topic(), new ListPartitionReassignmentsTopics().setName(tp.topic())); reassignmentTopicByTopicName.get(tp.topic()).partitionIndexes().add(tp.partition()); } listData.setTopics(new ArrayList<>(reassignmentTopicByTopicName.values())); } return new ListPartitionReassignmentsRequest.Builder(listData); } @Override void handleResponse(AbstractResponse abstractResponse) { ListPartitionReassignmentsResponse response = (ListPartitionReassignmentsResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: break; case NOT_CONTROLLER: handleNotControllerError(error); break; default: partitionReassignmentsFuture.completeExceptionally(new ApiError(error, response.data().errorMessage()).exception()); break; } Map<TopicPartition, PartitionReassignment> reassignmentMap = new HashMap<>(); for (OngoingTopicReassignment topicReassignment : response.data().topics()) { String topicName = topicReassignment.name(); for (OngoingPartitionReassignment partitionReassignment : topicReassignment.partitions()) { reassignmentMap.put( new TopicPartition(topicName, partitionReassignment.partitionIndex()), new PartitionReassignment(partitionReassignment.replicas(), partitionReassignment.addingReplicas(), partitionReassignment.removingReplicas()) ); } } partitionReassignmentsFuture.complete(reassignmentMap); } @Override void handleFailure(Throwable throwable) { partitionReassignmentsFuture.completeExceptionally(throwable); } }, now); return new ListPartitionReassignmentsResult(partitionReassignmentsFuture); }
@Test public void testListPartitionReassignments() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); TopicPartition tp1 = new TopicPartition("A", 0); OngoingPartitionReassignment tp1PartitionReassignment = new OngoingPartitionReassignment() .setPartitionIndex(0) .setRemovingReplicas(asList(1, 2, 3)) .setAddingReplicas(asList(4, 5, 6)) .setReplicas(asList(1, 2, 3, 4, 5, 6)); OngoingTopicReassignment tp1Reassignment = new OngoingTopicReassignment().setName("A") .setPartitions(Collections.singletonList(tp1PartitionReassignment)); TopicPartition tp2 = new TopicPartition("B", 0); OngoingPartitionReassignment tp2PartitionReassignment = new OngoingPartitionReassignment() .setPartitionIndex(0) .setRemovingReplicas(asList(1, 2, 3)) .setAddingReplicas(asList(4, 5, 6)) .setReplicas(asList(1, 2, 3, 4, 5, 6)); OngoingTopicReassignment tp2Reassignment = new OngoingTopicReassignment().setName("B") .setPartitions(Collections.singletonList(tp2PartitionReassignment)); // 1. NOT_CONTROLLER error handling ListPartitionReassignmentsResponseData notControllerData = new ListPartitionReassignmentsResponseData() .setErrorCode(Errors.NOT_CONTROLLER.code()) .setErrorMessage(Errors.NOT_CONTROLLER.message()); MetadataResponse controllerNodeResponse = RequestTestUtils.metadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.emptyList()); ListPartitionReassignmentsResponseData reassignmentsData = new ListPartitionReassignmentsResponseData() .setTopics(asList(tp1Reassignment, tp2Reassignment)); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(notControllerData)); env.kafkaClient().prepareResponse(controllerNodeResponse); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(reassignmentsData)); ListPartitionReassignmentsResult noControllerResult = env.adminClient().listPartitionReassignments(); noControllerResult.reassignments().get(); // no error // 2. UNKNOWN_TOPIC_OR_EXCEPTION_ERROR ListPartitionReassignmentsResponseData unknownTpData = new ListPartitionReassignmentsResponseData() .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) .setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message()); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData)); ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(asList(tp1, tp2))); TestUtils.assertFutureError(unknownTpResult.reassignments(), UnknownTopicOrPartitionException.class); // 3. Success ListPartitionReassignmentsResponseData responseData = new ListPartitionReassignmentsResponseData() .setTopics(asList(tp1Reassignment, tp2Reassignment)); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(responseData)); ListPartitionReassignmentsResult responseResult = env.adminClient().listPartitionReassignments(); Map<TopicPartition, PartitionReassignment> reassignments = responseResult.reassignments().get(); PartitionReassignment tp1Result = reassignments.get(tp1); assertEquals(tp1PartitionReassignment.addingReplicas(), tp1Result.addingReplicas()); assertEquals(tp1PartitionReassignment.removingReplicas(), tp1Result.removingReplicas()); assertEquals(tp1PartitionReassignment.replicas(), tp1Result.replicas()); assertEquals(tp1PartitionReassignment.replicas(), tp1Result.replicas()); PartitionReassignment tp2Result = reassignments.get(tp2); assertEquals(tp2PartitionReassignment.addingReplicas(), tp2Result.addingReplicas()); assertEquals(tp2PartitionReassignment.removingReplicas(), tp2Result.removingReplicas()); assertEquals(tp2PartitionReassignment.replicas(), tp2Result.replicas()); assertEquals(tp2PartitionReassignment.replicas(), tp2Result.replicas()); } }
static String isHostParam(final String given) { final String hostUri = StringHelper.notEmpty(given, "host"); final Matcher matcher = HOST_PATTERN.matcher(given); if (!matcher.matches()) { throw new IllegalArgumentException( "host must be an absolute URI (e.g. http://api.example.com), given: `" + hostUri + "`"); } return hostUri; }
@Test public void nullHostParamsAreNotAllowed() { assertThrows(IllegalArgumentException.class, () -> RestOpenApiHelper.isHostParam(null)); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.DEPT_CHILDREN_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为操作一个部门,涉及到多个缓存 public void deleteDept(Long id) { // 校验是否存在 validateDeptExists(id); // 校验是否有子部门 if (deptMapper.selectCountByParentId(id) > 0) { throw exception(DEPT_EXITS_CHILDREN); } // 删除部门 deptMapper.deleteById(id); }
@Test public void testDeleteDept_success() { // mock 数据 DeptDO dbDeptDO = randomPojo(DeptDO.class); deptMapper.insert(dbDeptDO);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbDeptDO.getId(); // 调用 deptService.deleteDept(id); // 校验数据不存在了 assertNull(deptMapper.selectById(id)); }
public void isAnyOf( @Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) { isIn(accumulate(first, second, rest)); }
@Test public void isAnyOfNullFailure() { expectFailure.whenTesting().that((String) null).isAnyOf("a", "b", "c"); }
public static boolean updateCache(Map<String, ?> cache, LoadbalancerRule rule) { if (!(rule instanceof ChangedLoadbalancerRule)) { return false; } ChangedLoadbalancerRule changedLoadbalancerRule = (ChangedLoadbalancerRule) rule; final String oldServiceName = changedLoadbalancerRule.getOldRule().getServiceName(); final String newServiceName = changedLoadbalancerRule.getNewRule().getServiceName(); if (Objects.isNull(oldServiceName) || Objects.isNull(newServiceName)) { cache.clear(); return true; } cache.remove(oldServiceName); cache.remove(newServiceName); return true; }
@Test public void testCache() { // judgment type test final LoadbalancerRule newRule = new LoadbalancerRule(NEW_SERVICE_NAME, RULE); final Map<String, Object> cache = buildCache(); Assert.assertFalse(CacheUtils.updateCache(cache, newRule)); // test cleanup data final Map<String, Object> changeCache = buildCache(); final ChangedLoadbalancerRule changedLoadbalancerRule = new ChangedLoadbalancerRule( new LoadbalancerRule(null, RULE), new LoadbalancerRule(OLD_SERVICE_NAME, RULE)); Assert.assertTrue(CacheUtils.updateCache(changeCache, changedLoadbalancerRule)); Assert.assertTrue(changeCache.isEmpty()); // There are other service names, not just the old and the new final Map<String, Object> cacheMore = buildCache(); cacheMore.put("otherService", new Object()); final ChangedLoadbalancerRule moreChangeRule = new ChangedLoadbalancerRule( new LoadbalancerRule(NEW_SERVICE_NAME, RULE), new LoadbalancerRule(OLD_SERVICE_NAME, RULE)); Assert.assertTrue(CacheUtils.updateCache(cacheMore, moreChangeRule)); Assert.assertEquals(1, cacheMore.size()); }
@Bean public CorsFilter corsFilter() { UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); CorsConfiguration config = jHipsterProperties.getCors(); if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) { log.debug("Registering CORS filter"); source.registerCorsConfiguration("/api/**", config); source.registerCorsConfiguration("/management/**", config); source.registerCorsConfiguration("/v3/api-docs", config); source.registerCorsConfiguration("/swagger-ui/**", config); } return new CorsFilter(source); }
@Test void shouldCorsFilterOnOtherPath() throws Exception { props.getCors().setAllowedOrigins(Collections.singletonList("*")); props.getCors().setAllowedMethods(Arrays.asList("GET", "POST", "PUT", "DELETE")); props.getCors().setAllowedHeaders(Collections.singletonList("*")); props.getCors().setMaxAge(1800L); props.getCors().setAllowCredentials(true); MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build(); mockMvc .perform(get("/test/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com")) .andExpect(status().isOk()) .andExpect(header().doesNotExist(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); }
public static void upgradeConfigurationAndVersion(RuleNode node, RuleNodeClassInfo nodeInfo) { JsonNode oldConfiguration = node.getConfiguration(); int configurationVersion = node.getConfigurationVersion(); int currentVersion = nodeInfo.getCurrentVersion(); var configClass = nodeInfo.getAnnotation().configClazz(); if (oldConfiguration == null || !oldConfiguration.isObject()) { log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " + "Current configuration is null or not a json object. " + "Going to set default configuration ... ", node.getId(), node.getType(), configurationVersion, currentVersion); node.setConfiguration(getDefaultConfig(configClass)); } else { var tbVersionedNode = getTbVersionedNode(nodeInfo); try { JsonNode queueName = oldConfiguration.get(QUEUE_NAME); TbPair<Boolean, JsonNode> upgradeResult = tbVersionedNode.upgrade(configurationVersion, oldConfiguration); if (upgradeResult.getFirst()) { node.setConfiguration(upgradeResult.getSecond()); if (nodeInfo.getAnnotation().hasQueueName() && queueName != null && queueName.isTextual()) { node.setQueueName(queueName.asText()); } } } catch (Exception e) { try { JacksonUtil.treeToValue(oldConfiguration, configClass); } catch (Exception ex) { log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " + "Going to set default configuration ... ", node.getId(), node.getType(), configurationVersion, currentVersion, e); node.setConfiguration(getDefaultConfig(configClass)); } } } node.setConfigurationVersion(currentVersion); }
@Test public void testUpgradeRuleNodeConfigurationWithInvalidConfigAndOldConfigVersion() throws Exception { // GIVEN var node = new RuleNode(); var nodeInfo = mock(RuleNodeClassInfo.class); var nodeConfigClazz = TbGetEntityDataNodeConfiguration.class; var annotation = mock(org.thingsboard.rule.engine.api.RuleNode.class); var defaultConfig = JacksonUtil.valueToTree(nodeConfigClazz.getDeclaredConstructor().newInstance().defaultConfiguration()); when(nodeInfo.getClazz()).thenReturn((Class) TbGetCustomerAttributeNode.class); when(nodeInfo.getCurrentVersion()).thenReturn(1); when(nodeInfo.getAnnotation()).thenReturn(annotation); when(annotation.configClazz()).thenReturn((Class) nodeConfigClazz); // missing telemetry field String oldConfig = "{\"attrMapping\":{\"alarmThreshold\":\"threshold\"}}";; node.setConfiguration(JacksonUtil.toJsonNode(oldConfig)); // WHEN TbNodeUpgradeUtils.upgradeConfigurationAndVersion(node, nodeInfo); // THEN Assertions.assertThat(node.getConfiguration()).isEqualTo(defaultConfig); Assertions.assertThat(node.getConfigurationVersion()).isEqualTo(1); }
@Override public PageResult<DiyTemplateDO> getDiyTemplatePage(DiyTemplatePageReqVO pageReqVO) { return diyTemplateMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetDiyTemplatePage() { // mock 数据 DiyTemplateDO dbDiyTemplate = randomPojo(DiyTemplateDO.class, o -> { // 等会查询到 o.setName(null); o.setUsed(null); o.setUsedTime(null); o.setRemark(null); o.setPreviewPicUrls(null); o.setProperty(null); o.setCreateTime(null); }); diyTemplateMapper.insert(dbDiyTemplate); // 测试 name 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setName(null))); // 测试 used 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setUsed(null))); // 测试 usedTime 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setUsedTime(null))); // 测试 remark 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setRemark(null))); // 测试 previewPicUrls 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setPreviewPicUrls(null))); // 测试 property 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setProperty(null))); // 测试 createTime 不匹配 diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setCreateTime(null))); // 准备参数 DiyTemplatePageReqVO reqVO = new DiyTemplatePageReqVO(); reqVO.setName(null); reqVO.setUsed(null); reqVO.setUsedTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<DiyTemplateDO> pageResult = diyTemplateService.getDiyTemplatePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbDiyTemplate, pageResult.getList().get(0)); }
public static String commandTopic(final KsqlConfig ksqlConfig) { return toKsqlInternalTopic(ksqlConfig, KSQL_COMMAND_TOPIC_SUFFIX); }
@Test public void shouldReturnCommandTopic() { // Given/When final String commandTopic = ReservedInternalTopics.commandTopic(ksqlConfig); // Then assertThat(commandTopic, is("_confluent-ksql-default__command_topic")); }
@Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; // Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand. if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1; if (spareHostCapacity == 0) { List<Move> mitigation = findMitigation(failurePath.get()); if (execute(mitigation, failurePath.get())) { // We succeeded or are in the process of taking a step to mitigate. // Report with the assumption this will eventually succeed to avoid alerting before we're stuck spareHostCapacity++; } else { success = false; } } metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; }
@Test public void testEmpty() { var tester = new SpareCapacityMaintainerTester(); tester.maintainer.maintain(); assertEquals(0, tester.deployer.activations); assertEquals(0, tester.nodeRepository.nodes().list().retired().size()); }
public static String[] splitOnSpace(String s) { return PATTERN_SPACE.split(s); }
@Test void testSplitOnSpace_onlySpaces() { String[] result = StringUtil.splitOnSpace(" "); assertArrayEquals(new String[] {}, result); }
public int[] findMatchingLines(List<String> left, List<String> right) { int[] index = new int[right.size()]; int dbLine = left.size(); int reportLine = right.size(); try { PathNode node = new MyersDiff<String>().buildPath(left, right); while (node.prev != null) { PathNode prevNode = node.prev; if (!node.isSnake()) { // additions reportLine -= (node.j - prevNode.j); // removals dbLine -= (node.i - prevNode.i); } else { // matches for (int i = node.i; i > prevNode.i; i--) { index[reportLine - 1] = dbLine; reportLine--; dbLine--; } } node = prevNode; } } catch (DifferentiationFailedException e) { LOG.error("Error finding matching lines", e); return index; } return index; }
@Test public void shouldIgnoreDeletedLinesAtEndOfFile() { List<String> database = new ArrayList<>(); database.add("line - 0"); database.add("line - 1"); database.add("line - 2"); database.add("line - 3"); database.add("line - 4"); List<String> report = new ArrayList<>(); report.add("line - 0"); report.add("line - 1"); report.add("line - 2"); int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report); assertThat(diff).containsExactly(1, 2, 3); }
public EnumSet<E> get() { return value; }
@SuppressWarnings("unchecked") @Test public void testSerializeAndDeserializeNull() throws IOException { boolean gotException = false; try { new EnumSetWritable<TestEnumSet>(null); } catch (RuntimeException e) { gotException = true; } assertTrue( "Instantiation of empty EnumSetWritable with no element type class " + "provided should throw exception", gotException); EnumSetWritable<TestEnumSet> nullFlagWritable = new EnumSetWritable<TestEnumSet>(null, TestEnumSet.class); DataOutputBuffer out = new DataOutputBuffer(); ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable .getClass(), null); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable .readObject(in, null)).get(); assertEquals(read, null); }
public void update(ResourceDesc resourceDesc) throws DdlException { Preconditions.checkState(name.equals(resourceDesc.getName())); Map<String, String> properties = resourceDesc.getProperties(); if (properties == null) { return; } // update spark configs if (properties.containsKey(SPARK_MASTER)) { throw new DdlException("Cannot change spark master"); } sparkConfigs.putAll(getSparkConfig(properties)); // update working dir and broker if (properties.containsKey(WORKING_DIR)) { workingDir = properties.get(WORKING_DIR); } if (properties.containsKey(BROKER)) { broker = properties.get(BROKER); hasBroker = true; } brokerProperties.putAll(getBrokerProperties(properties)); }
@Test public void testUpdate(@Injectable BrokerMgr brokerMgr, @Mocked GlobalStateMgr globalStateMgr) throws UserException { new Expectations() { { globalStateMgr.getBrokerMgr(); result = brokerMgr; brokerMgr.containsBroker(broker); result = true; } }; Analyzer analyzer = new Analyzer(Analyzer.AnalyzerVisitor.getInstance()); new Expectations() { { globalStateMgr.getAnalyzer(); result = analyzer; } }; properties.put("spark.master", "yarn"); properties.put("spark.submit.deployMode", "cluster"); properties.put("spark.driver.memory", "1g"); properties.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999"); properties.put("spark.hadoop.fs.defaultFS", "hdfs://127.0.0.1:10000"); CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties); com.starrocks.sql.analyzer.Analyzer.analyze(stmt, connectContext); SparkResource resource = (SparkResource) Resource.fromStmt(stmt); SparkResource copiedResource = resource.getCopiedResource(); Map<String, String> newProperties = Maps.newHashMap(); newProperties.put("spark.executor.memory", "1g"); newProperties.put("spark.driver.memory", "2g"); ResourceDesc resourceDesc = new ResourceDesc(name, newProperties); copiedResource.update(resourceDesc); Map<String, String> map = copiedResource.getSparkConfigs(); Assert.assertEquals(5, resource.getSparkConfigs().size()); Assert.assertEquals("1g", resource.getSparkConfigs().get("spark.driver.memory")); Assert.assertEquals(6, map.size()); Assert.assertEquals("2g", copiedResource.getSparkConfigs().get("spark.driver.memory")); }
static void register(String type, Class<?> clazz) { if (Modifier.isAbstract(clazz.getModifiers())) { return; } if (REGISTRY_REQUEST.containsKey(type)) { throw new RuntimeException(String.format("Fail to register, type:%s ,clazz:%s ", type, clazz.getName())); } REGISTRY_REQUEST.put(type, clazz); }
@Test void testRegisterDuplicated() { assertThrows(RuntimeException.class, () -> { PayloadRegistry.register("ErrorResponse", ErrorResponse.class); }); }
public static void localRunnerNotification(JobConf conf, JobStatus status) { JobEndStatusInfo notification = createNotification(conf, status); if (notification != null) { do { try { int code = httpNotification(notification.getUri(), notification.getTimeout()); if (code != 200) { throw new IOException("Invalid response status code: " + code); } else { break; } } catch (IOException ioex) { LOG.error("Notification error [" + notification.getUri() + "]", ioex); } catch (Exception ex) { LOG.error("Notification error [" + notification.getUri() + "]", ex); } try { Thread.sleep(notification.getRetryInterval()); } catch (InterruptedException iex) { LOG.error("Notification retry error [" + notification + "]", iex); } } while (notification.configureForRetry()); } }
@Test public void testLocalJobRunnerRetryCount() throws InterruptedException { int retryAttempts = 3; JobStatus jobStatus = createTestJobStatus( "job_20130313155005308_0001", JobStatus.SUCCEEDED); JobConf jobConf = createTestJobConf( new Configuration(), retryAttempts, baseUrl + "fail"); JobEndNotifier.localRunnerNotification(jobConf, jobStatus); // Validate params assertEquals(retryAttempts + 1, FailServlet.calledTimes); }
public static PodTemplateSpec createPodTemplateSpec( String workloadName, Labels labels, PodTemplate template, Map<String, String> defaultPodLabels, Map<String, String> podAnnotations, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes, List<LocalObjectReference> defaultImagePullSecrets, PodSecurityContext podSecurityContext ) { return new PodTemplateSpecBuilder() .withNewMetadata() .withLabels(labels.withAdditionalLabels(Util.mergeLabelsOrAnnotations(defaultPodLabels, TemplateUtils.labels(template))).toMap()) .withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, TemplateUtils.annotations(template))) .endMetadata() .withNewSpec() .withServiceAccountName(workloadName) .withEnableServiceLinks(template != null ? template.getEnableServiceLinks() : null) .withAffinity(affinity) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(template != null && template.getTolerations() != null ? template.getTolerations() : null) .withTerminationGracePeriodSeconds(template != null ? (long) template.getTerminationGracePeriodSeconds() : 30L) .withImagePullSecrets(imagePullSecrets(template, defaultImagePullSecrets)) .withSecurityContext(podSecurityContext) .withPriorityClassName(template != null ? template.getPriorityClassName() : null) .withSchedulerName(template != null && template.getSchedulerName() != null ? template.getSchedulerName() : "default-scheduler") .withHostAliases(template != null ? template.getHostAliases() : null) .withTopologySpreadConstraints(template != null ? template.getTopologySpreadConstraints() : null) .endSpec() .build(); }
@Test public void testCreatePodTemplateSpecWithTemplate() { PodTemplateSpec pod = WorkloadUtils.createPodTemplateSpec( NAME, LABELS, new PodTemplateBuilder() .withNewMetadata() .withLabels(Map.of("label-3", "value-3", "label-4", "value-4")) .withAnnotations(Map.of("anno-1", "value-1", "anno-2", "value-2")) .endMetadata() .withEnableServiceLinks(false) .withAffinity(new Affinity()) // => should be ignored .withImagePullSecrets(List.of(new LocalObjectReference("some-other-pull-secret"))) .withPriorityClassName("my-priority-class") .withHostAliases(DEFAULT_HOST_ALIAS) .withTolerations(DEFAULT_TOLERATION) .withTerminationGracePeriodSeconds(15) .withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(0L).build()) // => should be ignored .withTopologySpreadConstraints(DEFAULT_TOPOLOGY_SPREAD_CONSTRAINT) .withSchedulerName("my-scheduler") .build(), Map.of("default-label", "default-value"), Map.of("extra", "annotations"), DEFAULT_AFFINITY, List.of(new ContainerBuilder().withName("init-container").build()), List.of(new ContainerBuilder().withName("container").build()), VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false), List.of(new LocalObjectReference("some-pull-secret")), DEFAULT_POD_SECURITY_CONTEXT ); assertThat(pod.getMetadata().getLabels(), is(LABELS.withAdditionalLabels(Map.of("default-label", "default-value", "label-3", "value-3", "label-4", "value-4")).toMap())); assertThat(pod.getMetadata().getAnnotations(), is(Map.of("extra", "annotations", "anno-1", "value-1", "anno-2", "value-2"))); assertThat(pod.getSpec().getServiceAccountName(), is(NAME)); assertThat(pod.getSpec().getEnableServiceLinks(), is(false)); assertThat(pod.getSpec().getAffinity(), is(DEFAULT_AFFINITY)); assertThat(pod.getSpec().getInitContainers().size(), is(1)); assertThat(pod.getSpec().getInitContainers().get(0).getName(), is("init-container")); assertThat(pod.getSpec().getContainers().size(), is(1)); assertThat(pod.getSpec().getContainers().get(0).getName(), is("container")); assertThat(pod.getSpec().getVolumes(), is(VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false))); assertThat(pod.getSpec().getTolerations(), is(List.of(DEFAULT_TOLERATION))); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(15L)); assertThat(pod.getSpec().getImagePullSecrets(), is(List.of(new LocalObjectReference("some-other-pull-secret")))); assertThat(pod.getSpec().getSecurityContext(), is(DEFAULT_POD_SECURITY_CONTEXT)); assertThat(pod.getSpec().getPriorityClassName(), is("my-priority-class")); assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler")); assertThat(pod.getSpec().getHostAliases(), is(List.of(DEFAULT_HOST_ALIAS))); assertThat(pod.getSpec().getTopologySpreadConstraints(), is(List.of(DEFAULT_TOPOLOGY_SPREAD_CONSTRAINT))); }
@Override public void start() { this.all = registry.meter(name(getName(), "all")); this.trace = registry.meter(name(getName(), "trace")); this.debug = registry.meter(name(getName(), "debug")); this.info = registry.meter(name(getName(), "info")); this.warn = registry.meter(name(getName(), "warn")); this.error = registry.meter(name(getName(), "error")); super.start(); }
@Test public void usesSharedRegistries() { String registryName = "registry"; SharedMetricRegistries.add(registryName, registry); final InstrumentedAppender shared = new InstrumentedAppender(registryName); shared.start(); when(event.getLevel()).thenReturn(Level.INFO); shared.doAppend(event); assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount()) .isEqualTo(1); }
@Override public Map<String, String> getAllVariables() { return internalGetAllVariables(0, Collections.emptySet()); }
@Test void testGetAllVariablesWithExclusions() { MetricRegistry registry = NoOpMetricRegistry.INSTANCE; AbstractMetricGroup<?> group = new ProcessMetricGroup(registry, "host"); assertThat(group.getAllVariables(-1, Collections.singleton(ScopeFormat.SCOPE_HOST))) .isEmpty(); }
@Override public MetadataNode child(String name) { if (name.equals("name")) { return new MetadataLeafNode(image.name()); } else if (name.equals("id")) { return new MetadataLeafNode(image.id().toString()); } else { int partitionId; try { partitionId = Integer.parseInt(name); } catch (NumberFormatException e) { return null; } PartitionRegistration registration = image.partitions().get(partitionId); if (registration == null) return null; return new MetadataLeafNode(registration.toString()); } }
@Test public void testNameChild() { MetadataNode child = NODE.child("name"); assertNotNull(child); assertEquals(MetadataLeafNode.class, child.getClass()); }
public String getExpiration() { return expiration; }
@Test void claims_dateParse_issueTime() { try { AwsProxyRequest req = new AwsProxyRequestBuilder().fromJsonString(USER_POOLS_REQUEST).build(); assertEquals(EXP_TIME, req.getRequestContext().getAuthorizer().getClaims().getExpiration()); assertNotNull(req.getRequestContext().getAuthorizer().getClaims().getExpiration()); ZonedDateTime expTime = ZonedDateTime.from(TOKEN_DATE_FORMATTER.parse(EXP_TIME)); ZonedDateTime issueTime = ZonedDateTime.from(TOKEN_DATE_FORMATTER.parse(ISSUE_TIME)); assertEquals(expTime, ZonedDateTime.from(TOKEN_DATE_FORMATTER.parse(req.getRequestContext().getAuthorizer().getClaims().getExpiration()))); assertEquals(expTime, issueTime.plusHours(1)); } catch (IOException e) { e.printStackTrace(); fail(); } }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void leftShift() { assertUnifiesAndInlines( "4 << 17", UBinary.create(Kind.LEFT_SHIFT, ULiteral.intLit(4), ULiteral.intLit(17))); }
@Override // Camel calls this method if the endpoint isSynchronous(), as the // KafkaEndpoint creates a SynchronousDelegateProducer for it public void process(Exchange exchange) throws Exception { // is the message body a list or something that contains multiple values Message message = exchange.getIn(); if (transactionId != null) { startKafkaTransaction(exchange); } if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) { processIterableSync(exchange, message); } else { processSingleMessageSync(exchange, message); } }
@Test public void processSendsMessageWithListOfExchangesWithOverrideTopicHeaderOnEveryExchange() throws Exception { endpoint.getConfiguration().setTopic("someTopic"); Mockito.when(exchange.getIn()).thenReturn(in); Mockito.when(exchange.getMessage()).thenReturn(in); // we set the initial topic in.setHeader(KafkaConstants.OVERRIDE_TOPIC, "anotherTopic"); in.setHeader(KafkaConstants.KEY, "someKey"); // we add our exchanges in order to aggregate final List<Exchange> nestedExchanges = createListOfExchangesWithTopics(Arrays.asList("overridenTopic1", "overridenTopic2", "overridenTopic3")); // aggregate final Exchange finalAggregatedExchange = aggregateExchanges(nestedExchanges, new GroupedExchangeAggregationStrategy()); in.setBody(finalAggregatedExchange.getIn().getBody()); in.setHeaders(finalAggregatedExchange.getIn().getHeaders()); producer.process(exchange); // assert results verifySendMessages(Arrays.asList("overridenTopic1", "overridenTopic2", "overridenTopic3"), null); assertRecordMetadataExists(3); assertRecordMetadataExistsForEachAggregatedExchange(); }
public void resolveAssertionConsumerService(AuthenticationRequest authenticationRequest) throws SamlValidationException { // set URL if set in authnRequest final String authnAcsURL = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceURL(); if (authnAcsURL != null) { authenticationRequest.setAssertionConsumerURL(authnAcsURL); return; } // search url from metadata endpoints final Integer authnAcsIdx = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceIndex(); List<Endpoint> endpoints = authenticationRequest.getConnectionEntity().getRoleDescriptors().get(0).getEndpoints(AssertionConsumerService.DEFAULT_ELEMENT_NAME); if (endpoints.isEmpty()) { throw new SamlValidationException("Authentication: Assertion Consumer Service not found in metadata"); } if (authnAcsIdx != null && endpoints.size() <= authnAcsIdx) { throw new SamlValidationException("Authentication: Assertion Consumer Index is out of bounds"); } // TODO: check if this statement is correct if (endpoints.size() == 1) { authenticationRequest.setAssertionConsumerURL(endpoints.get(0).getLocation()); return; } if(authnAcsIdx == null) { AssertionConsumerService defaultAcs = endpoints.stream() .filter(e -> e instanceof AssertionConsumerService) .map(acs -> (AssertionConsumerService) acs) .filter(IndexedEndpoint::isDefault) .findAny() .orElse(null); if (defaultAcs == null) { throw new SamlValidationException("Authentication: There is no default AssertionConsumerService"); } authenticationRequest.setAssertionConsumerURL(defaultAcs.getLocation()); return; } authenticationRequest.setAssertionConsumerURL(endpoints.get(authnAcsIdx).getLocation()); }
@Test void resolveAcsUrlWithoutIndexInMultiAcsNoDefaultMetadata() { AuthnRequest authnRequest = OpenSAMLUtils.buildSAMLObject(AuthnRequest.class); AuthenticationRequest authenticationRequest = new AuthenticationRequest(); authenticationRequest.setAuthnRequest(authnRequest); authenticationRequest.setConnectionEntity(MetadataParser.readMetadata(stubsMultiAcsMetadataFileWithoutDefault, CONNECTION_ENTITY_ID)); Exception exception = assertThrows(SamlValidationException.class, () -> assertionConsumerServiceUrlService.resolveAssertionConsumerService(authenticationRequest)); assertEquals("Authentication: There is no default AssertionConsumerService", exception.getMessage()); }
@VisibleForTesting public int getNodeToLabelsFailedRetrieved() { return numGetNodeToLabelsFailedRetrieved.value(); }
@Test public void testGetNodeToLabelsFailed() { long totalBadBefore = metrics.getNodeToLabelsFailedRetrieved(); badSubCluster.getNodeToLabels(); Assert.assertEquals(totalBadBefore + 1, metrics.getNodeToLabelsFailedRetrieved()); }
public static UUIDUtils getInstance() { return ID_WORKER_UTILS; }
@Test public void testConstructor() throws Exception { Class<?> uUIDUtilsClass = UUIDUtils.getInstance().getClass(); Class<?>[] p = {long.class, long.class, long.class}; Constructor<?> constructor = uUIDUtilsClass.getDeclaredConstructor(p); constructor.setAccessible(true); try { constructor.newInstance(-1L, 10L, 10L); } catch (InvocationTargetException ex) { assertTrue(ex.getCause().getMessage().startsWith("worker Id can't be greater than")); } try { constructor.newInstance(10L, -1L, 10L); } catch (InvocationTargetException ex) { assertTrue(ex.getCause().getMessage().startsWith("datacenter Id can't be greater than")); } }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldEvaluateTypeForArrayReferenceInStruct() { // Given: final SqlStruct inner = SqlTypes .struct() .field("IN0", SqlTypes.array(SqlTypes.INTEGER)) .build(); final LogicalSchema schema = LogicalSchema.builder() .keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(COL0, inner) .build(); expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); final Expression structRef = new DereferenceExpression( Optional.empty(), new UnqualifiedColumnReferenceExp(COL0), "IN0" ); final Expression expression = new SubscriptExpression(structRef, new IntegerLiteral(1)); // When: final SqlType result = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(result, is(SqlTypes.INTEGER)); }
@Override public LocalId toLocalId() { return this; }
@Test void toLocalId() { LocalPredictionId localPredictionId = new LocalPredictionId(fileName, name); LocalId retrieved = localPredictionId.toLocalId(); assertThat(retrieved).isEqualTo(localPredictionId); }
public static NGram[][] of(Collection<String[]> sentences, int maxNGramSize, int minFrequency) { ArrayList<Set<NGram>> features = new ArrayList<>(maxNGramSize + 1); Set<NGram> feature = new HashSet<>(); features.add(feature); EnglishPunctuations punctuations = EnglishPunctuations.getInstance(); for (int n = 1; n <= maxNGramSize; n++) { Map<smile.nlp.NGram, MutableInt> candidates = new HashMap<>(); for (String[] sentence : sentences) { for (int i = 0; i <= sentence.length - n; i++) { smile.nlp.NGram ngram = new smile.nlp.NGram(Arrays.copyOfRange(sentence, i, i+n)); boolean add = false; if (n == 1) { add = true; } else { NGram initialGram = new NGram(Arrays.copyOfRange(sentence, i, i+n-1), 0); NGram finalGram = new NGram(Arrays.copyOfRange(sentence, i+1, i+n), 0); if (feature.contains(initialGram) && feature.contains(finalGram)) { add = true; } } if (add) { MutableInt count = candidates.get(ngram); if (count == null) { candidates.put(ngram, new MutableInt(1)); } else { count.increment(); } } } } feature = new HashSet<>(); features.add(feature); for (Map.Entry<smile.nlp.NGram, MutableInt> entry : candidates.entrySet()) { MutableInt count = entry.getValue(); if (count.value >= minFrequency) { smile.nlp.NGram ngram = entry.getKey(); if (ngram.words.length == 1 && punctuations.contains(ngram.words[0])) { continue; } feature.add(new NGram(ngram.words, count.value)); } } } // filter out stop words EnglishStopWords stopWords = EnglishStopWords.DEFAULT; return features.stream().map(ngrams -> { NGram[] collocations = ngrams.stream().filter(ngram -> { boolean stopWord = true; String[] words = ngram.words; if (!stopWords.contains(words[0]) && !stopWords.contains(words[words.length - 1])) { for (String word : words) { if (!stopWords.contains(word)) { stopWord = false; break; } } } return !stopWord; }).toArray(NGram[]::new); Arrays.sort(collocations, Collections.reverseOrder()); return collocations; }).toArray(NGram[][]::new); }
@Test public void testExtract() throws IOException { System.out.println("n-gram extraction"); String text = new String(Files.readAllBytes(smile.util.Paths.getTestData("text/turing.txt"))); PorterStemmer stemmer = new PorterStemmer(); SimpleTokenizer tokenizer = new SimpleTokenizer(); ArrayList<String[]> sentences = new ArrayList<>(); for (String paragraph : SimpleParagraphSplitter.getInstance().split(text)) { for (String s : SimpleSentenceSplitter.getInstance().split(paragraph)) { String[] sentence = tokenizer.split(s); for (int i = 0; i < sentence.length; i++) { sentence[i] = stemmer.stripPluralParticiple(sentence[i]).toLowerCase(); } sentences.add(sentence); } } NGram[][] result = NGram.of(sentences, 4, 4); assertEquals(5, result.length); for (NGram[] ngrams : result) { for (NGram ngram : ngrams) { System.out.println(ngram); } System.out.println(); } assertEquals(0, result[0].length); assertEquals(331, result[1].length); assertEquals(16, result[2].length); assertEquals(7, result[3].length); assertEquals(0, result[4].length); }
public float[] colMeans() { float[] x = colSums(); for (int j = 0; j < n; j++) { x[j] /= m; } return x; }
@Test public void testColMeans() { System.out.println("colMeans"); float[][] A = { { 0.7220180f, 0.07121225f, 0.6881997f}, {-0.2648886f, -0.89044952f, 0.3700456f}, {-0.6391588f, 0.44947578f, 0.6240573f} }; float[] r = {-0.06067647f, -0.12325383f, 0.56076753f}; float[] result = Matrix.of(A).colMeans(); for (int i = 0; i < r.length; i++) { assertEquals(result[i], r[i], 1E-7f); } }
private static boolean canSatisfyConstraints(ApplicationId appId, PlacementConstraint constraint, SchedulerNode node, AllocationTagsManager atm, Optional<DiagnosticsCollector> dcOpt) throws InvalidAllocationTagsQueryException { if (constraint == null) { LOG.debug("Constraint is found empty during constraint validation for" + " app:{}", appId); return true; } // If this is a single constraint, transform to SingleConstraint SingleConstraintTransformer singleTransformer = new SingleConstraintTransformer(constraint); constraint = singleTransformer.transform(); AbstractConstraint sConstraintExpr = constraint.getConstraintExpr(); // TODO handle other type of constraints, e.g CompositeConstraint if (sConstraintExpr instanceof SingleConstraint) { SingleConstraint single = (SingleConstraint) sConstraintExpr; return canSatisfySingleConstraint(appId, single, node, atm, dcOpt); } else if (sConstraintExpr instanceof And) { And and = (And) sConstraintExpr; return canSatisfyAndConstraint(appId, and, node, atm, dcOpt); } else if (sConstraintExpr instanceof Or) { Or or = (Or) sConstraintExpr; return canSatisfyOrConstraint(appId, or, node, atm, dcOpt); } else { throw new InvalidAllocationTagsQueryException( "Unsupported type of constraint: " + sConstraintExpr.getClass().getSimpleName()); } }
@Test public void testNodeAntiAffinityAssignment() throws InvalidAllocationTagsQueryException { PlacementConstraintManagerService pcm = new MemoryPlacementConstraintManager(); AllocationTagsManager tm = new AllocationTagsManager(rmContext); // Register App1 with anti-affinity constraint map pcm.registerApplication(appId1, constraintMap2); /** * place container: * Node0:123 (Rack1): * container_app1_1 (hbase-m) */ RMNode n0_r1 = rmNodes.get(0); RMNode n1_r1 = rmNodes.get(1); RMNode n2_r2 = rmNodes.get(2); RMNode n3_r2 = rmNodes.get(3); SchedulerNode schedulerNode0 =newSchedulerNode(n0_r1.getHostName(), n0_r1.getRackName(), n0_r1.getNodeID()); SchedulerNode schedulerNode1 =newSchedulerNode(n1_r1.getHostName(), n1_r1.getRackName(), n1_r1.getNodeID()); SchedulerNode schedulerNode2 =newSchedulerNode(n2_r2.getHostName(), n2_r2.getRackName(), n2_r2.getNodeID()); SchedulerNode schedulerNode3 =newSchedulerNode(n3_r2.getHostName(), n3_r2.getRackName(), n3_r2.getNodeID()); // 1 Containers on node 0 with allocationTag 'hbase-m' ContainerId hbase_m = ContainerId .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0); tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m")); // 'spark' placement on Node0 should now FAIL Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm)); // SUCCEED on the rest of the nodes Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm)); }
@Override public byte[] serialize(final String topic, final List<?> values) { if (values == null) { return null; } final T single = extractOnlyColumn(values, topic); return inner.serialize(topic, single); }
@Test public void shouldThrowIfWrongType() { // Then: final Exception e = assertThrows( SerializationException.class, () -> serializer.serialize("t", ImmutableList.of(12)) ); // Then: assertThat(e.getMessage(), is("value does not match expected type. " + "expected: String, but got: Integer")); }
public SchemaObject findFunction(String functionName) { return getDefaultSchema().findFunction(functionName); }
@Test public void testFindFunction() { SchemaRepository repository = new SchemaRepository(JdbcConstants.MYSQL); SQLCreateFunctionStatement stmt = new SQLCreateFunctionStatement(); String funcName = "Test"; stmt.setName(new SQLIdentifierExpr(funcName)); repository.acceptCreateFunction(stmt); SchemaObject schemaObject = repository.findFunction(funcName); assertNotNull(schemaObject); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableDictionaryForSortedColumn() throws Exception { IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); for (int i = 0; i < RAW_SORTED_INDEX_COLUMNS.size(); i++) { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); String column = RAW_SORTED_INDEX_COLUMNS.get(i); indexLoadingConfig.removeNoDictionaryColumns(column); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); testIndexExists(column, StandardIndexes.forward()); testIndexExists(column, StandardIndexes.dictionary()); validateIndexMap(column, true, false); validateForwardIndex(column, null, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. int dictionaryElementSize = 0; FieldSpec.DataType dataType = metadata.getDataType(); if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(column, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
public void checkAgainstThreshold() throws ThresholdExceedException { int recentEvents = getEventCountsRecentInterval(); if (recentEvents >= maxEventsPerInterval) { throw new ThresholdExceedException( String.format( "%d events detected in the recent interval, reaching the threshold %f.", recentEvents, maxEventsPerInterval)); } }
@Test void testCheckAgainstThreshold() { final ThresholdMeter thresholdMeter = createSmallThresholdMeter(); // first THRESHOLD_SMALL - 1 events should not exceed threshold for (int i = 0; i < THRESHOLD_SMALL - 1; ++i) { thresholdMeter.markEvent(); clock.advanceTime(SLEEP, TimeUnit.MILLISECONDS); thresholdMeter.checkAgainstThreshold(); } // the THRESHOLD_SMALL-th event should exceed threshold thresholdMeter.markEvent(); assertThatThrownBy(() -> thresholdMeter.checkAgainstThreshold()) .isInstanceOf(ThresholdExceedException.class); }
public static void evalPlaceholders(Map<String, Object> headersMap, String path, String consumerPath) { evalPlaceholders(headersMap::put, path, consumerPath); }
@Test @DisplayName("Test that the placeholders can eval if the given path is greater than the consumer path") void testEvalPlaceholdersOutOfBound2() { Map<String, Object> headers = new HashMap<>(); assertDoesNotThrow(() -> HttpHelper.evalPlaceholders(headers, "/some/url/value", "/some/{key}"), "The provided path is greater than the consumer path, so it should not throw an exception"); assertNotEquals(0, headers.size()); assertEquals("url", headers.get("key")); }
@Override public CompletableFuture<Void> quiesce() { if (!quiesced) { quiesced = true; if (numRunningTimers.get() == 0) { quiesceCompletedFuture.complete(null); } } return quiesceCompletedFuture; }
@Test void testQuiesceWhenNoRunningTimers() { ProcessingTimeServiceImpl processingTimeService = new ProcessingTimeServiceImpl(timerService, v -> v); assertThat(processingTimeService.quiesce()).isDone(); }
public int readInt(InputStream in) throws IOException { return ((byte) in.read() & 0xff) << 24 | ((byte) in.read() & 0xff) << 16 | ((byte) in.read() & 0xff) << 8 | (byte) in.read() & 0xff; }
@Test public void readInt() throws Exception { }
public static List<String> finalDestination(List<String> elements) { if (isMagicPath(elements)) { List<String> destDir = magicPathParents(elements); List<String> children = magicPathChildren(elements); checkArgument(!children.isEmpty(), "No path found under the prefix " + MAGIC_PATH_PREFIX); ArrayList<String> dest = new ArrayList<>(destDir); if (containsBasePath(children)) { // there's a base marker in the path List<String> baseChildren = basePathChildren(children); checkArgument(!baseChildren.isEmpty(), "No path found under " + BASE); dest.addAll(baseChildren); } else { dest.add(filename(children)); } return dest; } else { return elements; } }
@Test public void testFinalDestinationBaseDirectChild() { finalDestination(l(MAGIC_PATH_PREFIX, BASE, "3.txt")); }
static S3ResourceId fromUri(String uri) { Matcher m = S3_URI.matcher(uri); checkArgument(m.matches(), "Invalid S3 URI: [%s]", uri); String scheme = m.group("SCHEME"); String bucket = m.group("BUCKET"); String key = Strings.nullToEmpty(m.group("KEY")); if (!key.startsWith("/")) { key = "/" + key; } return fromComponents(scheme, bucket, key); }
@Test public void testResourceIdTester() { S3Options options = PipelineOptionsFactory.create().as(S3Options.class); options.setAwsRegion(Region.US_WEST_1); FileSystems.setDefaultPipelineOptions(options); ResourceIdTester.runResourceIdBattery(S3ResourceId.fromUri("s3://bucket/foo/")); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void stopMessageLiveLocation() { BaseResponse response = bot.execute(new StopMessageLiveLocation(chatId, 10009)); if (!response.isOk()) { assertEquals(400, response.errorCode()); assertEquals("Bad Request: message can't be edited", response.description()); } response = bot.execute(new StopMessageLiveLocation("AgAAAPrwAQCj_Q4D2s-51_8jsuU")); if (!response.isOk()) { assertEquals(400, response.errorCode()); assertEquals("Bad Request: message is not modified: specified new message content and reply markup are exactly the same as a current content and reply markup of the message", response.description()); } }
long timeToNextHeartbeat(long now) { update(now); return heartbeatTimer.remainingMs(); }
@Test public void testTimeToNextHeartbeat() { heartbeat.sentHeartbeat(time.milliseconds()); assertEquals(heartbeatIntervalMs, heartbeat.timeToNextHeartbeat(time.milliseconds())); time.sleep(heartbeatIntervalMs); assertEquals(0, heartbeat.timeToNextHeartbeat(time.milliseconds())); time.sleep(heartbeatIntervalMs); assertEquals(0, heartbeat.timeToNextHeartbeat(time.milliseconds())); }
@Override public boolean deletesAreDetected(final int type) { return false; }
@Test void assertDeletesAreDetected() { assertFalse(metaData.deletesAreDetected(0)); }
protected void read(final ProtocolFactory protocols, final Local file) throws AccessDeniedException { try (final BufferedReader in = new BufferedReader(new InputStreamReader(file.getInputStream(), StandardCharsets.UTF_8))) { String l; while((l = in.readLine()) != null) { Matcher array = Pattern.compile("\\[(.*?)\\]").matcher(l); while(array.find()) { Matcher entries = Pattern.compile("\\{(.*?)\\}").matcher(array.group(1)); while(entries.find()) { final String entry = entries.group(1); this.read(protocols, entry); } } } } catch(IOException e) { throw new AccessDeniedException(e.getMessage(), e); } }
@Test(expected = AccessDeniedException.class) public void testParseNotFound() throws Exception { new FireFtpBookmarkCollection().read(new ProtocolFactory(Collections.emptySet()), new Local(System.getProperty("java.io.tmpdir"), "f")); }
public Option<Dataset<Row>> loadAsDataset(SparkSession spark, List<CloudObjectMetadata> cloudObjectMetadata, String fileFormat, Option<SchemaProvider> schemaProviderOption, int numPartitions) { if (LOG.isDebugEnabled()) { LOG.debug("Extracted distinct files " + cloudObjectMetadata.size() + " and some samples " + cloudObjectMetadata.stream().map(CloudObjectMetadata::getPath).limit(10).collect(Collectors.toList())); } if (isNullOrEmpty(cloudObjectMetadata)) { return Option.empty(); } DataFrameReader reader = spark.read().format(fileFormat); String datasourceOpts = getStringWithAltKeys(properties, CloudSourceConfig.SPARK_DATASOURCE_OPTIONS, true); if (schemaProviderOption.isPresent()) { Schema sourceSchema = schemaProviderOption.get().getSourceSchema(); if (sourceSchema != null && !sourceSchema.equals(InputBatch.NULL_SCHEMA)) { reader = reader.schema(AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema)); } } if (StringUtils.isNullOrEmpty(datasourceOpts)) { // fall back to legacy config for BWC. TODO consolidate in HUDI-6020 datasourceOpts = getStringWithAltKeys(properties, S3EventsHoodieIncrSourceConfig.SPARK_DATASOURCE_OPTIONS, true); } if (StringUtils.nonEmpty(datasourceOpts)) { final ObjectMapper mapper = new ObjectMapper(); Map<String, String> sparkOptionsMap = null; try { sparkOptionsMap = mapper.readValue(datasourceOpts, Map.class); } catch (IOException e) { throw new HoodieException(String.format("Failed to parse sparkOptions: %s", datasourceOpts), e); } LOG.info(String.format("sparkOptions loaded: %s", sparkOptionsMap)); reader = reader.options(sparkOptionsMap); } List<String> paths = new ArrayList<>(); for (CloudObjectMetadata o : cloudObjectMetadata) { paths.add(o.getPath()); } boolean isCommaSeparatedPathFormat = properties.getBoolean(SPARK_DATASOURCE_READER_COMMA_SEPARATED_PATH_FORMAT.key(), false); Dataset<Row> dataset; if (isCommaSeparatedPathFormat) { dataset = reader.load(String.join(",", paths)); } else { dataset = reader.load(paths.toArray(new String[cloudObjectMetadata.size()])); } // add partition column from source path if configured if (containsConfigProperty(properties, PATH_BASED_PARTITION_FIELDS)) { String[] partitionKeysToAdd = getStringWithAltKeys(properties, PATH_BASED_PARTITION_FIELDS).split(","); // Add partition column for all path-based partition keys. If key is not present in path, the value will be null. for (String partitionKey : partitionKeysToAdd) { String partitionPathPattern = String.format("%s=", partitionKey); LOG.info(String.format("Adding column %s to dataset", partitionKey)); dataset = dataset.withColumn(partitionKey, split(split(input_file_name(), partitionPathPattern).getItem(1), "/").getItem(0)); } } dataset = coalesceOrRepartition(dataset, numPartitions); return Option.of(dataset); }
@Test public void partitionValueAddedToRow() { List<CloudObjectMetadata> input = Collections.singletonList(new CloudObjectMetadata("src/test/resources/data/partitioned/country=US/state=CA/data.json", 1)); TypedProperties properties = new TypedProperties(); properties.put("hoodie.streamer.source.cloud.data.partition.fields.from.path", "country,state"); CloudObjectsSelectorCommon cloudObjectsSelectorCommon = new CloudObjectsSelectorCommon(properties); Option<Dataset<Row>> result = cloudObjectsSelectorCommon.loadAsDataset(sparkSession, input, "json", Option.empty(), 1); Assertions.assertTrue(result.isPresent()); Assertions.assertEquals(1, result.get().count()); Row expected = RowFactory.create("some data", "US", "CA"); Assertions.assertEquals(Collections.singletonList(expected), result.get().collectAsList()); }
public static Map<String, ActiveRuleParamDto> groupByKey(Collection<ActiveRuleParamDto> params) { Map<String, ActiveRuleParamDto> result = new HashMap<>(); for (ActiveRuleParamDto param : params) { result.put(param.getKey(), param); } return result; }
@Test void groupByKey() { assertThat(ActiveRuleParamDto.groupByKey(Collections.emptyList())).isEmpty(); Collection<ActiveRuleParamDto> dtos = Arrays.asList( new ActiveRuleParamDto().setKey("foo"), new ActiveRuleParamDto().setKey("bar")); Map<String, ActiveRuleParamDto> group = ActiveRuleParamDto.groupByKey(dtos); assertThat(group).containsOnlyKeys("foo", "bar"); }
public static JsonNode buildResponseJsonSchema(String schemaText, String query) throws IOException { TypeDefinitionRegistry registry = new SchemaParser().parse(schemaText); GraphQLSchema schema = new SchemaGenerator().makeExecutableSchema(registry, RuntimeWiring.MOCKED_WIRING); Document graphqlRequest = new Parser().parseDocument(query); QueryTraverser queryTraversal = QueryTraverser.newQueryTraverser().schema(schema).document(graphqlRequest) .variables(new HashMap<>()).build(); ObjectNode jsonSchema = initResponseJsonSchema(); QueryVisitor visitor = new JsonSchemaBuilderQueryVisitor( (ObjectNode) jsonSchema.get("properties").get(GRAPHQL_RESPONSE_DATA)); queryTraversal.visitPreOrder(visitor); return jsonSchema; }
@Test void testBuildResponseJsonSchema() { String schemaText; String queryText = "{\n" + " hero {\n" + " name\n" + " email\n" + " family\n" + " affiliate\n" + " movies {\n" + " title\n" + " }\n" + " }\n" + "}"; JsonNode responseSchema = null; try { // Load schema from file. schemaText = FileUtils .readFileToString(new File("target/test-classes/io/github/microcks/util/graphql/basic-heroes.graphql")); // Build JsonSchema for response. responseSchema = GraphQLSchemaValidator.buildResponseJsonSchema(schemaText, queryText); } catch (Exception e) { fail("Exception should not be thrown"); } assertFalse( responseSchema.path("properties").path("data").path("properties").path("hero") instanceof MissingNode); ObjectNode heroNode = (ObjectNode) responseSchema.path("properties").path("data").path("properties").path("hero"); assertEquals("object", heroNode.get("type").asText()); assertEquals(JsonNodeType.OBJECT, heroNode.get("properties").getNodeType()); assertEquals(JsonNodeType.ARRAY, heroNode.get("required").getNodeType()); assertEquals(JsonNodeType.BOOLEAN, heroNode.get("additionalProperties").getNodeType()); ArrayNode requiredHero = (ArrayNode) heroNode.get("required"); assertEquals(5, requiredHero.size()); Iterator<JsonNode> requiredHeroElements = requiredHero.elements(); while (requiredHeroElements.hasNext()) { String requiredHeroField = requiredHeroElements.next().asText(); assertTrue("name".equals(requiredHeroField) || "email".equals(requiredHeroField) || "family".equals(requiredHeroField) || "affiliate".equals(requiredHeroField) || "movies".equals(requiredHeroField)); } ObjectNode moviesNode = (ObjectNode) heroNode.path("properties").path("movies"); assertEquals("array", moviesNode.get("type").asText()); assertEquals(JsonNodeType.OBJECT, moviesNode.get("items").getNodeType()); ObjectNode movieItemsNode = (ObjectNode) moviesNode.get("items"); assertEquals("object", movieItemsNode.get("type").asText()); assertEquals(JsonNodeType.OBJECT, movieItemsNode.get("properties").getNodeType()); assertEquals(JsonNodeType.ARRAY, movieItemsNode.get("required").getNodeType()); assertEquals(JsonNodeType.BOOLEAN, movieItemsNode.get("additionalProperties").getNodeType()); ArrayNode requiredMovie = (ArrayNode) movieItemsNode.get("required"); assertEquals(1, requiredMovie.size()); }
@SuppressWarnings({ "nullness" // TODO(https://github.com/apache/beam/issues/20497) }) public static TableReference parseTableSpec(String tableSpec) { Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec); if (!match.matches()) { throw new IllegalArgumentException( String.format( "Table specification [%s] is not in one of the expected formats (" + " [project_id]:[dataset_id].[table_id]," + " [project_id].[dataset_id].[table_id]," + " [dataset_id].[table_id])", tableSpec)); } TableReference ref = new TableReference(); ref.setProjectId(match.group("PROJECT")); return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE")); }
@Test public void testTableParsing_noProjectId() { TableReference ref = BigQueryHelpers.parseTableSpec("data_set.table_name"); assertEquals(null, ref.getProjectId()); assertEquals("data_set", ref.getDatasetId()); assertEquals("table_name", ref.getTableId()); }
public static String getDateFormatByRegex( String regex ) { return getDateFormatByRegex( regex, null ); }
@Test public void testGetDateFormatByRegex() { assertNull( DateDetector.getDateFormatByRegex( null ) ); assertEquals( SAMPLE_DATE_FORMAT, DateDetector.getDateFormatByRegex( SAMPLE_REGEXP ) ); }
@Override public CompletableFuture<StreamObserver<T>> invoke(Object[] arguments) { StreamObserver<R> responseObserver = (StreamObserver<R>) arguments[0]; StreamObserver<T> requestObserver = func.apply(responseObserver); return CompletableFuture.completedFuture(requestObserver); }
@Test void invoke() throws ExecutionException, InterruptedException, TimeoutException { StreamObserver<String> responseObserver = Mockito.mock(StreamObserver.class); BiStreamMethodHandler<String, String> handler = new BiStreamMethodHandler<>(o -> responseObserver); CompletableFuture<StreamObserver<String>> future = handler.invoke(new Object[] {responseObserver}); Assertions.assertEquals(responseObserver, future.get(1, TimeUnit.SECONDS)); }
@Transactional public AccessKey create(String appId, AccessKey entity) { long count = accessKeyRepository.countByAppId(appId); if (count >= ACCESSKEY_COUNT_LIMIT) { throw new BadRequestException("AccessKeys count limit exceeded"); } entity.setId(0L); entity.setAppId(appId); entity.setDataChangeLastModifiedBy(entity.getDataChangeCreatedBy()); AccessKey accessKey = accessKeyRepository.save(entity); auditService.audit(AccessKey.class.getSimpleName(), accessKey.getId(), Audit.OP.INSERT, accessKey.getDataChangeCreatedBy()); return accessKey; }
@Test public void testCreate() { String appId = "someAppId"; String secret = "someSecret"; AccessKey entity = assembleAccessKey(appId, secret); AccessKey accessKey = accessKeyService.create(appId, entity); assertNotNull(accessKey); }
@Operation(summary = "list", description = "List host-components") @GetMapping("/hosts/{hostId}") public ResponseEntity<List<HostComponentVO>> listByHost(@PathVariable Long clusterId, @PathVariable Long hostId) { return ResponseEntity.success(hostComponentService.listByHost(clusterId, hostId)); }
@Test void listByHostReturnsHostComponentsForHost() { Long clusterId = 1L; Long hostId = 1L; List<HostComponentVO> hostComponents = Arrays.asList(new HostComponentVO(), new HostComponentVO()); when(hostComponentService.listByHost(clusterId, hostId)).thenReturn(hostComponents); ResponseEntity<List<HostComponentVO>> response = hostComponentController.listByHost(clusterId, hostId); assertTrue(response.isSuccess()); assertEquals(hostComponents, response.getData()); }
@Override public Object evaluate(EvaluationContext ctx) { try { ctx.enterFrame(); List<Object> toReturn = new ArrayList<>(); ctx.setValue("partial", toReturn); populateToReturn(0, ctx, toReturn); LOG.trace("returning {}", toReturn); return toReturn; } catch (EndpointOfRangeNotValidTypeException | EndpointOfRangeOfDifferentTypeException e) { // ast error already reported return null; } finally { ctx.exitFrame(); } }
@Test void evaluateSimpleArray() { IterationContextNode x = getIterationContextNode("x", getListNode("[ 1, 2, 3, 4 ]", Arrays.asList("1", "2", "3", "4")), "x in [ 1, 2, 3, 4 ]"); IterationContextNode y = getIterationContextNode("y", getNameRefNode(BuiltInType.UNKNOWN, "x"), "y in x"); ForExpressionNode forExpressionNode = new ForExpressionNode(Arrays.asList(x, y), getNameRefNode(BuiltInType.UNKNOWN, "y"), "for x in [ 1, 2, 3, 4 ], y in x return y"); Object retrieved = forExpressionNode.evaluate(CodegenTestUtil.newEmptyEvaluationContext()); assertThat(retrieved).isInstanceOf(List.class).asList(). containsExactly(BigDecimal.ONE, BigDecimal.valueOf(2), BigDecimal.valueOf(3), BigDecimal.valueOf(4)); }
@DELETE @Path("{networkId}") public Response removeVirtualNetwork(@PathParam("networkId") long networkId) { NetworkId nid = NetworkId.networkId(networkId); vnetAdminService.removeVirtualNetwork(nid); return Response.noContent().build(); }
@Test public void testDeleteVirtualNetwork() { mockVnetAdminService.removeVirtualNetwork(anyObject()); expectLastCall(); replay(mockVnetAdminService); WebTarget wt = target() .property(ClientProperties.SUPPRESS_HTTP_COMPLIANCE_VALIDATION, true); Response response = wt.path("vnets/" + "2") .request(MediaType.APPLICATION_JSON_TYPE) .delete(); assertThat(response.getStatus(), is(HttpURLConnection.HTTP_NO_CONTENT)); verify(mockVnetAdminService); }
@Override public String getSerializableForm(boolean includeConfidence) { /* * Note: Due to the sparse implementation of MultiLabel, all 'key=value' pairs will have value=true. That is, * say 'all possible labels' for a dataset are {R1,R2} but this particular example has label set = {R1}. Then * this method will output only "R1=true",whereas one might expect "R1=true,R2=false". Nevertheless, we generate * the 'serializable form' of this MultiLabel in this way to be consistent with that of other multi-output types * such as MultipleRegressor. */ String str = labels.stream() .map(label -> String.format("%s=%b", label, true)) .collect(Collectors.joining(",")); if (includeConfidence) { return str + ":" + score; } return str; }
@Test public void getsCorrectSerializableForm() { MultiLabel abc = new MultiLabel(mkLabelSet("a", "b", "c")); assertEquals("a=true,b=true,c=true", abc.getSerializableForm(false)); assertEquals("a=true,b=true,c=true:NaN", abc.getSerializableForm(true)); MultiLabel scored = new MultiLabel(mkLabelSet("a", "b", "c"), 1.0); assertEquals("a=true,b=true,c=true", scored.getSerializableForm(false)); assertEquals("a=true,b=true,c=true:1.0", scored.getSerializableForm(true)); }
public static QueryOptimizer newOptimizer(HazelcastProperties properties) { HazelcastProperty property = ClusterProperty.QUERY_OPTIMIZER_TYPE; String string = properties.getString(property); Type type; try { type = Type.valueOf(string); } catch (IllegalArgumentException e) { throw onInvalidOptimizerType(string); } switch (type) { case RULES: return new RuleBasedQueryOptimizer(); default: return new EmptyOptimizer(); } }
@Test(expected = IllegalArgumentException.class) public void newOptimizer_whenUnknownValue_thenThrowIllegalArgumentException() { HazelcastProperties hazelcastProperties = createMockHazelcastProperties(QUERY_OPTIMIZER_TYPE, "foo"); QueryOptimizerFactory.newOptimizer(hazelcastProperties); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void testCustomArrayWithTypeVariable() { RichMapFunction<CustomArrayObject2<Boolean>[], ?> function = new IdentityMapper<CustomArrayObject2<Boolean>[]>(); TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( function, (TypeInformation) TypeInformation.of(new TypeHint<Tuple1<Boolean>[]>() {})); assertThat(ti).isInstanceOf(ObjectArrayTypeInfo.class); ObjectArrayTypeInfo<?, ?> oati = (ObjectArrayTypeInfo<?, ?>) ti; assertThat(oati.getComponentInfo().isTupleType()).isTrue(); TupleTypeInfo<?> tti = (TupleTypeInfo<?>) oati.getComponentInfo(); assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO); }
@Override public List<Intent> compile(MultiPointToSinglePointIntent intent, List<Intent> installable) { Map<DeviceId, Link> links = new HashMap<>(); ConnectPoint egressPoint = intent.egressPoint(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint ingressPoint : intent.ingressPoints()) { if (ingressPoint.deviceId().equals(egressPoint.deviceId())) { if (deviceService.isAvailable(ingressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, ingressPoint.deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; for (Link link : path.links()) { if (links.containsKey(link.dst().deviceId())) { // We've already reached the existing tree with the first // part of this path. Add the merging point with different // incoming port, but don't add the remainder of the path // in case it differs from the path we already have. links.put(link.src().deviceId(), link); break; } links.put(link.src().deviceId(), link); } } else { missingSomePaths = true; } } // Allocate bandwidth on existing paths if a bandwidth constraint is set List<ConnectPoint> ingressCPs = intent.filteredIngressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); ConnectPoint egressCP = intent.filteredEgressPoint().connectPoint(); List<ConnectPoint> pathCPs = links.values().stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.addAll(ingressCPs); pathCPs.add(egressCP); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .treatment(intent.treatment()) .selector(intent.selector()) .links(Sets.newHashSet(links.values())) .filteredIngressPoints(intent.filteredIngressPoints()) .filteredEgressPoints(ImmutableSet.of(intent.filteredEgressPoint())) .priority(intent.priority()) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testPartialFailureConstraintFailure() { Set<FilteredConnectPoint> ingress = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)), new FilteredConnectPoint(new ConnectPoint(DID_5, PORT_1))); FilteredConnectPoint egress = new FilteredConnectPoint(new ConnectPoint(DID_4, PORT_2)); MultiPointToSinglePointIntent intent = makeIntent(ingress, egress); String[] hops = {S3}; MultiPointToSinglePointIntentCompiler compiler = makeCompiler(null, new IntentTestsMocks.FixedMP2MPMockPathService(hops), null); assertThat(compiler, is(notNullValue())); intentException.expect(IntentException.class); List<Intent> result = compiler.compile(intent, null); assertThat(result, null); }
@Override public void apply(ProcessRoutersRequest request, PolarisRouterContext routerContext) { //1. get feature env router label key String envLabelKey = routerContext.getLabel(LABEL_KEY_FEATURE_ENV_ROUTER_KEY); if (StringUtils.isBlank(envLabelKey)) { envLabelKey = DEFAULT_FEATURE_ENV_ROUTER_LABEL; } //2. get feature env router label value String envLabelValue = routerContext.getLabel(envLabelKey); if (envLabelValue == null) { // router to base env when not matched feature env envLabelValue = NOT_EXISTED_ENV; } //3. set env metadata to router request Set<RouteArgument> routeArguments = new HashSet<>(1); routeArguments.add(RouteArgument.buildCustom(envLabelKey, envLabelValue)); request.putRouterArgument(MetadataRouter.ROUTER_TYPE_METADATA, routeArguments); //4. set failover type to others request.setMetadataFailoverType(MetadataFailoverType.METADATAFAILOVERNOTKEY); }
@Test public void testNotExistedEnvLabel() { Map<String, String> labels = new HashMap<>(); labels.put("system-feature-env-router-label", "specify-env"); PolarisRouterContext routerContext = new PolarisRouterContext(); routerContext.putLabels(RouterConstant.ROUTER_LABELS, labels); ProcessRoutersRequest request = new ProcessRoutersRequest(); ServiceInstances serviceInstances = new DefaultServiceInstances(Mockito.mock(ServiceKey.class), new ArrayList<>()); request.setDstInstances(serviceInstances); FeatureEnvRouterRequestInterceptor interceptor = new FeatureEnvRouterRequestInterceptor(); interceptor.apply(request, routerContext); Map<String, String> metadataRouterLabels = request.getRouterMetadata().get(MetadataRouter.ROUTER_TYPE_METADATA); assertThat(metadataRouterLabels.size()).isEqualTo(1); assertThat(metadataRouterLabels.get("specify-env")).isEqualTo("NOT_EXISTED_ENV"); }
public Map<String, String> pukRequestAllowed(PukRequest request) throws PukRequestException { final PenRequestStatus result = repository.findFirstByBsnAndDocTypeAndSequenceNoOrderByRequestDatetimeDesc(request.getBsn(), request.getDocType(), request.getSequenceNo()); checkExpirationDatePen(result); return statusOK; }
@Test public void pukRequestAllowedOnlyPossibleAfterPenRequest() throws PukRequestException { Mockito.when(mockRepository.findFirstByBsnAndDocTypeAndSequenceNoOrderByRequestDatetimeDesc(request.getBsn(), request.getDocType(), request.getSequenceNo())).thenReturn(null); Exception exception = assertThrows(PukRequestException.class, () -> { service.pukRequestAllowed(request); }); assertEquals("DWS6", exception.getMessage()); }
@Override public Optional<ConfigTable> readConfig(Set<String> keys) { removeUninterestedKeys(keys); registerKeyListeners(keys); final ConfigTable table = new ConfigTable(); configItemKeyedByName.forEach((key, value) -> { if (value.isPresent()) { table.add(new ConfigTable.ConfigItem(key, value.get())); } else { table.add(new ConfigTable.ConfigItem(key, null)); } }); return Optional.of(table); }
@Test public void shouldUpdateCachesWhenNotified() { cacheByKey = new ConcurrentHashMap<>(); configItemKeyedByName = new ConcurrentHashMap<>(); Whitebox.setInternalState(register, "cachesByKey", cacheByKey); Whitebox.setInternalState(register, "configItemKeyedByName", configItemKeyedByName); KVCache cache1 = mock(KVCache.class); KVCache cache2 = mock(KVCache.class); ArgumentCaptor<ConsulCache.Listener> listener1 = ArgumentCaptor.forClass(ConsulCache.Listener.class); ArgumentCaptor<ConsulCache.Listener> listener2 = ArgumentCaptor.forClass(ConsulCache.Listener.class); try (MockedStatic<KVCache> kvCacheMockedStatic = mockStatic(KVCache.class)) { kvCacheMockedStatic.when(() -> KVCache.newCache(any(), eq("key1"))).thenReturn(cache1); kvCacheMockedStatic.when(() -> KVCache.newCache(any(), eq("key2"))).thenReturn(cache2); when(register.readConfig(any(Set.class))).thenCallRealMethod(); register.readConfig(Sets.newHashSet("key1", "key2")); verify(cache1).addListener(listener1.capture()); verify(cache2).addListener(listener2.capture()); listener1.getValue() .notify(ImmutableMap.of("key1", ImmutableValue.builder() .createIndex(0) .modifyIndex(0) .lockIndex(0) .key("key1") .flags(0) .value(BaseEncoding.base64().encode("val1".getBytes())) .build())); listener2.getValue() .notify(ImmutableMap.of("key2", ImmutableValue.builder() .createIndex(0) .modifyIndex(0) .lockIndex(0) .key("key2") .flags(0) .value(BaseEncoding.base64().encode("val2".getBytes())) .build())); assertEquals(2, configItemKeyedByName.size()); assertEquals("val1", configItemKeyedByName.get("key1").get()); assertEquals("val2", configItemKeyedByName.get("key2").get()); } }
public static String getUserId() { ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes(); if (attributes != null) { HttpServletRequest request = attributes.getRequest(); return request.getHeader(CommonConstants.USER_ID_HEADER); } return ""; }
@Test public void testGetUserIdFromHeaderWhenMissing() { // Prepare the scenario where the header is missing when(request.getHeader(CommonConstants.USER_ID_HEADER)).thenReturn(null); // Call the method under test String userId = HeaderUtil.getUserId(); // Assert that an empty string is returned when the header is missing assertNull(userId); }
public static synchronized BigInteger getMaxTaskUsage(final Metrics metricRegistry) { final Collection<KafkaMetric> taskMetrics = metricRegistry .metrics() .entrySet() .stream() .filter(e -> e.getKey().name().contains(TASK_STORAGE_USED_BYTES)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) .values(); final Optional<BigInteger> maxOfTaskMetrics = taskMetrics .stream() .map(e -> (BigInteger) e.metricValue()) .reduce(BigInteger::max); return maxOfTaskMetrics.orElse(BigInteger.ZERO); }
@Test public void shouldRecordMaxTaskUsageWithNoTasks() { // Given: when(metrics.metrics()).thenReturn(Collections.emptyMap()); // When: // Then: BigInteger maxVal = StorageUtilizationMetricsReporter.getMaxTaskUsage(metrics); assertEquals(maxVal, BigInteger.valueOf(0)); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldThrowOnV1TableSourceWithPseudoColumnVersionGreaterThanZero() { // Given: givenNonWindowedTable(); givenExistingQueryWithOldPseudoColumnVersion(tableSourceV1); when(tableSourceV1.getPseudoColumnVersion()).thenReturn(CURRENT_PSEUDOCOLUMN_VERSION_NUMBER); // When: final Exception e = assertThrows( IllegalStateException.class, () -> SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ) ); // Then: assertThat( e.getMessage(), containsString("TableSourceV2 was released in conjunction with pseudocolumn version 1. Something has gone very wrong") ); }
public final ThreadProperties threadProperties() { ThreadProperties threadProperties = this.threadProperties; if (threadProperties == null) { Thread thread = this.thread; if (thread == null) { assert !inEventLoop(); submit(NOOP_TASK).syncUninterruptibly(); thread = this.thread; assert thread != null; } threadProperties = new DefaultThreadProperties(thread); if (!PROPERTIES_UPDATER.compareAndSet(this, null, threadProperties)) { threadProperties = this.threadProperties; } } return threadProperties; }
@Test public void testThreadProperties() { final AtomicReference<Thread> threadRef = new AtomicReference<Thread>(); SingleThreadEventExecutor executor = new SingleThreadEventExecutor( null, new DefaultThreadFactory("test"), false) { @Override protected void run() { threadRef.set(Thread.currentThread()); while (!confirmShutdown()) { Runnable task = takeTask(); if (task != null) { task.run(); } } } }; ThreadProperties threadProperties = executor.threadProperties(); Thread thread = threadRef.get(); assertEquals(thread.getId(), threadProperties.id()); assertEquals(thread.getName(), threadProperties.name()); assertEquals(thread.getPriority(), threadProperties.priority()); assertEquals(thread.isAlive(), threadProperties.isAlive()); assertEquals(thread.isDaemon(), threadProperties.isDaemon()); assertTrue(threadProperties.stackTrace().length > 0); executor.shutdownGracefully(); }
@Override public Closeable enter() { // Only update status from tracked thread to avoid race condition and inconsistent state updates if (executionContext.getExecutionStateTracker().getTrackedThread() != Thread.currentThread()) { return () -> {}; } updateCurrentStateIfOutdated(); return executionContext.getExecutionStateTracker().enterState(currentExecutionState); }
@Test public void testEnterEntersStateIfCalledFromTrackedThread() { DataflowExecutionContext mockedExecutionContext = mock(DataflowExecutionContext.class); DataflowOperationContext mockedOperationContext = mock(DataflowOperationContext.class); final int siIndexId = 3; ExecutionStateTracker mockedExecutionStateTracker = mock(ExecutionStateTracker.class); when(mockedExecutionContext.getExecutionStateTracker()).thenReturn(mockedExecutionStateTracker); Thread mockedThreadObject = Thread.currentThread(); when(mockedExecutionStateTracker.getTrackedThread()).thenReturn(mockedThreadObject); DataflowExecutionState mockedExecutionState = mock(DataflowExecutionState.class); when(mockedExecutionStateTracker.getCurrentState()).thenReturn(mockedExecutionState); NameContext mockedNameContext = mock(NameContext.class); when(mockedExecutionState.getStepName()).thenReturn(mockedNameContext); when(mockedNameContext.originalName()).thenReturn("DummyName"); NameContext mockedDeclaringNameContext = mock(NameContext.class); when(mockedOperationContext.nameContext()).thenReturn(mockedDeclaringNameContext); when(mockedDeclaringNameContext.originalName()).thenReturn("DummyDeclaringName"); CounterFactory mockedCounterFactory = mock(CounterFactory.class); when(mockedExecutionContext.getCounterFactory()).thenReturn(mockedCounterFactory); Counter<Long, Long> mockedCounter = mock(Counter.class); when(mockedCounterFactory.longSum(any())).thenReturn(mockedCounter); DataflowExecutionStateRegistry mockedExecutionStateRegistry = mock(DataflowExecutionStateRegistry.class); when(mockedExecutionContext.getExecutionStateRegistry()) .thenReturn(mockedExecutionStateRegistry); DataflowExecutionState mockedCounterExecutionState = mock(DataflowExecutionState.class); when(mockedExecutionStateRegistry.getIOState(any(), any(), any(), any(), any(), any())) .thenReturn(mockedCounterExecutionState); DataflowSideInputReadCounter testObject = new DataflowSideInputReadCounter(mockedExecutionContext, mockedOperationContext, siIndexId); testObject.enter(); verify(mockedExecutionStateTracker).enterState(mockedCounterExecutionState); }
@Override public String getCommandName() { return COMMAND_NAME; }
@Test public void backupCmdExecuted() throws IOException, AlluxioException, NoSuchFieldException, IllegalAccessException { CollectEnvCommand cmd = new CollectEnvCommand(FileSystemContext.create()); // Write to temp dir File targetDir = InfoCollectorTestUtils.createTemporaryDirectory(); CommandLine mockCommandLine = mock(CommandLine.class); String[] mockArgs = new String[]{cmd.getCommandName(), targetDir.getAbsolutePath()}; when(mockCommandLine.getArgs()).thenReturn(mockArgs); when(mockCommandLine.getOptionValue("output-dir", "")) .thenReturn(targetDir.getAbsolutePath()); // Replace commands to execute Field f = cmd.getClass().getSuperclass().getDeclaredField("mCommands"); f.setAccessible(true); ShellCommand mockCommandFail = mock(ShellCommand.class); when(mockCommandFail.runWithOutput()).thenReturn( new CommandReturn(255, "command failed")); Map<String, ShellCommand> mockCommandMap = new HashMap<>(); mockCommandMap.put("mockCommand", mockCommandFail); f.set(cmd, mockCommandMap); // Replace better command to execute Field cb = cmd.getClass().getSuperclass().getDeclaredField("mCommandsAlt"); cb.setAccessible(true); ShellCommand mockCommandBackup = mock(ShellCommand.class); when(mockCommandBackup.runWithOutput()).thenReturn( new CommandReturn(0, "backup command executed")); Map<String, ShellCommand> mockBetterMap = new HashMap<>(); mockBetterMap.put("mockCommand", mockCommandBackup); cb.set(cmd, mockBetterMap); // The backup command worked so exit code is 0 int ret = cmd.run(mockCommandLine); assertEquals(0, ret); // Verify the 1st option command failed, then backup executed verify(mockCommandFail).runWithOutput(); verify(mockCommandBackup).runWithOutput(); // Files will be copied to sub-dir of target dir File subDir = new File(Paths.get(targetDir.getAbsolutePath(), cmd.getCommandName()).toString()); assertEquals(new String[]{"collectEnv.txt"}, subDir.list()); // Verify only the better version command output is found String fileContent = new String(Files.readAllBytes(subDir.listFiles()[0].toPath())); assertTrue(fileContent.contains("backup command executed")); }
public static int nextCapacity(int current) { assert current > 0 && Long.bitCount(current) == 1 : "Capacity must be a power of two."; if (current < MIN_CAPACITY / 2) { current = MIN_CAPACITY / 2; } current <<= 1; if (current < 0) { throw new RuntimeException("Maximum capacity exceeded."); } return current; }
@Test(expected = RuntimeException.class) public void testNextCapacity_withLong_shouldThrowIfMaxCapacityReached() { long capacity = Long.highestOneBit(Long.MAX_VALUE - 1); nextCapacity(capacity); }
@Override public void accept(T t) { updateTimeHighWaterMark(t.time()); shortTermStorage.add(t); drainDueToLatestInput(t); //standard drain policy drainDueToTimeHighWaterMark(); //prevent blow-up when data goes backwards in time sizeHighWaterMark = Math.max(sizeHighWaterMark, shortTermStorage.size()); }
@Test public void testAllPointsWithinWindow() { /* * Confirm that no points are emitted when all the points occur within the time window */ Duration maxLag = Duration.ofMinutes(5); ApproximateTimeSorter<TimePojo> sorter = new ApproximateTimeSorter<>( maxLag, (TimePojo t) -> { throw new UnsupportedOperationException("No Point should be forward to this Consumer"); } ); for (TimePojo timeBox : testData()) { sorter.accept(timeBox); } }
@VisibleForTesting public static String expandEnvironment(String var, Path containerLogDir) { var = var.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR, containerLogDir.toString()); var = var.replace(ApplicationConstants.CLASS_PATH_SEPARATOR, File.pathSeparator); if (Shell.isJavaVersionAtLeast(17)) { var = var.replace(ApplicationConstants.JVM_ADD_OPENS_VAR, ADDITIONAL_JDK17_PLUS_OPTIONS); } else { var = var.replace(ApplicationConstants.JVM_ADD_OPENS_VAR, ""); } // replace parameter expansion marker. e.g. {{VAR}} on Windows is replaced // as %VAR% and on Linux replaced as "$VAR" if (Shell.WINDOWS) { var = var.replaceAll("(\\{\\{)|(\\}\\})", "%"); } else { var = var.replace(ApplicationConstants.PARAMETER_EXPANSION_LEFT, "$"); var = var.replace(ApplicationConstants.PARAMETER_EXPANSION_RIGHT, ""); } return var; }
@Test(timeout = 10000) public void testEnvExpansion() throws IOException { Path logPath = new Path("/nm/container/logs"); String input = Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*" + ApplicationConstants.CLASS_PATH_SEPARATOR + Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/lib/*" + ApplicationConstants.CLASS_PATH_SEPARATOR + Apps.crossPlatformify("HADOOP_LOG_HOME") + ApplicationConstants.LOG_DIR_EXPANSION_VAR + " " + ApplicationConstants.JVM_ADD_OPENS_VAR; String res = ContainerLaunch.expandEnvironment(input, logPath); String additionalJdk17PlusOptions = "--add-opens=java.base/java.lang=ALL-UNNAMED " + "--add-exports=java.base/sun.net.dns=ALL-UNNAMED " + "--add-exports=java.base/sun.net.util=ALL-UNNAMED"; String expectedAddOpens = Shell.isJavaVersionAtLeast(17) ? additionalJdk17PlusOptions : ""; if (Shell.WINDOWS) { Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;" + "%HADOOP_HOME%/share/hadoop/common/lib/*;" + "%HADOOP_LOG_HOME%/nm/container/logs" + " " + expectedAddOpens, res); } else { Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:" + "$HADOOP_HOME/share/hadoop/common/lib/*:" + "$HADOOP_LOG_HOME/nm/container/logs" + " " + expectedAddOpens, res); } System.out.println(res); }