focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Output run(RunContext runContext) throws Exception { URI from = new URI(runContext.render(this.from)); final PebbleFieldExtractor keyExtractor = getKeyExtractor(runContext); final Map<String, Long> index = new HashMap<>(); // can be replaced by small-footprint Map implementation // 1st iteration: build a map of key->offset try (final BufferedReader reader = newBufferedReader(runContext, from)) { long offset = 0L; String item; while ((item = reader.readLine()) != null) { String key = keyExtractor.apply(item); index.put(key, offset); offset++; } } // metrics long processedItemsTotal = 0L; long droppedItemsTotal = 0L; long numKeys = index.size(); final Path path = runContext.workingDir().createTempFile(".ion"); // 2nd iteration: write deduplicate try (final BufferedWriter writer = Files.newBufferedWriter(path); final BufferedReader reader = newBufferedReader(runContext, from)) { long offset = 0L; String item; while ((item = reader.readLine()) != null) { String key = keyExtractor.apply(item); Long lastOffset = index.get(key); if (lastOffset != null && lastOffset == offset) { writer.write(item); writer.newLine(); } else { droppedItemsTotal++; } offset++; processedItemsTotal++; } } URI uri = runContext.storage().putFile(path.toFile()); index.clear(); return Output .builder() .uri(uri) .numKeys(numKeys) .processedItemsTotal(processedItemsTotal) .droppedItemsTotal(droppedItemsTotal) .build(); }
@Test void shouldDeduplicateFileGivenKeyExpressionReturningArray() throws Exception { // Given RunContext runContext = runContextFactory.of(); List<KeyValue2> values = List.of( new KeyValue2("k1", "k1", "v1"), new KeyValue2("k2", "k2", "v1"), new KeyValue2("k3", "k3", "v1"), new KeyValue2("k1", "k1", "v2"), new KeyValue2("k2", "k2", null), new KeyValue2("k3", "k3", "v2"), new KeyValue2("k1", "k1", "v3") ); DeduplicateItems task = DeduplicateItems .builder() .from(generateKeyValueFile(values, runContext).toString()) .expr(" {{ key }}-{{ v1 }}") .build(); // When DeduplicateItems.Output output = task.run(runContext); // Then Assertions.assertNotNull(output); Assertions.assertNotNull(output.getUri()); Assertions.assertEquals(3, output.getNumKeys()); Assertions.assertEquals(4, output.getDroppedItemsTotal()); Assertions.assertEquals(7, output.getProcessedItemsTotal()); List<KeyValue2> expected = List.of( new KeyValue2("k2", "k2", null), new KeyValue2("k3", "k3", "v2"), new KeyValue2("k1", "k1", "v3") ); assertSimpleCompactedFile(runContext, output, expected, KeyValue2.class); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.3"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { boolean processed = false; for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { processed = extension.importExtensionData(name, reader); if (processed) { // if the extension processed data, break out of this inner loop // (only the first extension to claim an extension point gets it) break; } } } if (!processed) { // unknown token, skip it reader.skipValue(); } } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportClients() throws IOException { ClientDetailsEntity client1 = new ClientDetailsEntity(); client1.setId(1L); client1.setAccessTokenValiditySeconds(3600); client1.setClientId("client1"); client1.setClientSecret("clientsecret1"); client1.setRedirectUris(ImmutableSet.of("http://foo.com/")); client1.setScope(ImmutableSet.of("foo", "bar", "baz", "dolphin")); client1.setGrantTypes(ImmutableSet.of("implicit", "authorization_code", "urn:ietf:params:oauth:grant_type:redelegate", "refresh_token")); client1.setAllowIntrospection(true); ClientDetailsEntity client2 = new ClientDetailsEntity(); client2.setId(2L); client2.setAccessTokenValiditySeconds(3600); client2.setClientId("client2"); client2.setClientSecret("clientsecret2"); client2.setRedirectUris(ImmutableSet.of("http://bar.baz.com/")); client2.setScope(ImmutableSet.of("foo", "dolphin", "electric-wombat")); client2.setGrantTypes(ImmutableSet.of("client_credentials", "urn:ietf:params:oauth:grant_type:redelegate")); client2.setAllowIntrospection(false); String configJson = "{" + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.CLIENTS + "\": [" + "{\"id\":1,\"accessTokenValiditySeconds\":3600,\"clientId\":\"client1\",\"secret\":\"clientsecret1\"," + "\"redirectUris\":[\"http://foo.com/\"]," + "\"scope\":[\"foo\",\"bar\",\"baz\",\"dolphin\"]," + "\"grantTypes\":[\"implicit\",\"authorization_code\",\"urn:ietf:params:oauth:grant_type:redelegate\",\"refresh_token\"]," + "\"allowIntrospection\":true}," + "{\"id\":2,\"accessTokenValiditySeconds\":3600,\"clientId\":\"client2\",\"secret\":\"clientsecret2\"," + "\"redirectUris\":[\"http://bar.baz.com/\"]," + "\"scope\":[\"foo\",\"dolphin\",\"electric-wombat\"]," + "\"grantTypes\":[\"client_credentials\",\"urn:ietf:params:oauth:grant_type:redelegate\"]," + "\"allowIntrospection\":false}" + " ]" + "}"; logger.debug(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); dataService.importData(reader); verify(clientRepository, times(2)).saveClient(capturedClients.capture()); List<ClientDetailsEntity> savedClients = capturedClients.getAllValues(); assertThat(savedClients.size(), is(2)); assertThat(savedClients.get(0).getAccessTokenValiditySeconds(), equalTo(client1.getAccessTokenValiditySeconds())); assertThat(savedClients.get(0).getClientId(), equalTo(client1.getClientId())); assertThat(savedClients.get(0).getClientSecret(), equalTo(client1.getClientSecret())); assertThat(savedClients.get(0).getRedirectUris(), equalTo(client1.getRedirectUris())); assertThat(savedClients.get(0).getScope(), equalTo(client1.getScope())); assertThat(savedClients.get(0).getGrantTypes(), equalTo(client1.getGrantTypes())); assertThat(savedClients.get(0).isAllowIntrospection(), equalTo(client1.isAllowIntrospection())); assertThat(savedClients.get(1).getAccessTokenValiditySeconds(), equalTo(client2.getAccessTokenValiditySeconds())); assertThat(savedClients.get(1).getClientId(), equalTo(client2.getClientId())); assertThat(savedClients.get(1).getClientSecret(), equalTo(client2.getClientSecret())); assertThat(savedClients.get(1).getRedirectUris(), equalTo(client2.getRedirectUris())); assertThat(savedClients.get(1).getScope(), equalTo(client2.getScope())); assertThat(savedClients.get(1).getGrantTypes(), equalTo(client2.getGrantTypes())); assertThat(savedClients.get(1).isAllowIntrospection(), equalTo(client2.isAllowIntrospection())); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertExtendedBlock() { ExtendedBlock b = getExtendedBlock(); ExtendedBlockProto bProto = PBHelperClient.convert(b); ExtendedBlock b1 = PBHelperClient.convert(bProto); assertEquals(b, b1); b.setBlockId(-1); bProto = PBHelperClient.convert(b); b1 = PBHelperClient.convert(bProto); assertEquals(b, b1); }
public static long parseBytes(String text) throws IllegalArgumentException { Objects.requireNonNull(text, "text cannot be null"); final String trimmed = text.trim(); if (trimmed.isEmpty()) { throw new IllegalArgumentException("argument is an empty- or whitespace-only string"); } final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unit = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final long value; try { value = Long.parseLong(number); // this throws a NumberFormatException on overflow } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be re represented as 64bit number (numeric overflow)."); } final long multiplier = parseUnit(unit).map(MemoryUnit::getMultiplier).orElse(1L); final long result = value * multiplier; // check for overflow if (result / multiplier != value) { throw new IllegalArgumentException( "The value '" + text + "' cannot be re represented as 64bit number of bytes (numeric overflow)."); } return result; }
@Test void testTrimBeforeParse() { assertThat(MemorySize.parseBytes(" 155 ")).isEqualTo(155L); assertThat(MemorySize.parseBytes(" 155 bytes ")).isEqualTo(155L); }
public static JobConfigurer createConfigurer(Job job) { return JobConfigurer.create(job); }
@Test public void testMultiOutputFormatWithoutReduce() throws Throwable { Job job = new Job(mrConf, "MultiOutNoReduce"); job.setMapperClass(MultiOutWordIndexMapper.class); job.setJarByClass(this.getClass()); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(MultiOutputFormat.class); job.setNumReduceTasks(0); JobConfigurer configurer = MultiOutputFormat.createConfigurer(job); configurer.addOutputFormat("out1", TextOutputFormat.class, IntWritable.class, Text.class); configurer.addOutputFormat("out2", SequenceFileOutputFormat.class, Text.class, IntWritable.class); Path outDir = new Path(workDir.getPath(), job.getJobName()); FileOutputFormat.setOutputPath(configurer.getJob("out1"), new Path(outDir, "out1")); FileOutputFormat.setOutputPath(configurer.getJob("out2"), new Path(outDir, "out2")); String fileContent = "Hello World"; String inputFile = createInputFile(fileContent); FileInputFormat.setInputPaths(job, new Path(inputFile)); //Test for merging of configs DistributedCache.addFileToClassPath(new Path(inputFile), job.getConfiguration(), fs); String dummyFile = createInputFile("dummy file"); DistributedCache.addFileToClassPath(new Path(dummyFile), configurer.getJob("out1") .getConfiguration(), fs); // duplicate of the value. Merging should remove duplicates DistributedCache.addFileToClassPath(new Path(inputFile), configurer.getJob("out2") .getConfiguration(), fs); configurer.configure(); // Verify if the configs are merged Path[] fileClassPaths = DistributedCache.getFileClassPaths(job.getConfiguration()); List<Path> fileClassPathsList = Arrays.asList(fileClassPaths); Assert.assertTrue("Cannot find " + (new Path(inputFile)) + " in " + fileClassPathsList, fileClassPathsList.contains(new Path(inputFile))); Assert.assertTrue("Cannot find " + (new Path(dummyFile)) + " in " + fileClassPathsList, fileClassPathsList.contains(new Path(dummyFile))); URI[] cacheFiles = DistributedCache.getCacheFiles(job.getConfiguration()); List<URI> cacheFilesList = Arrays.asList(cacheFiles); URI inputFileURI = new Path(inputFile).makeQualified(fs).toUri(); Assert.assertTrue("Cannot find " + inputFileURI + " in " + cacheFilesList, cacheFilesList.contains(inputFileURI)); URI dummyFileURI = new Path(dummyFile).makeQualified(fs).toUri(); Assert.assertTrue("Cannot find " + dummyFileURI + " in " + cacheFilesList, cacheFilesList.contains(dummyFileURI)); Assert.assertTrue(job.waitForCompletion(true)); Path textOutPath = new Path(outDir, "out1/part-m-00000"); String[] textOutput = readFully(textOutPath).split("\n"); Path seqOutPath = new Path(outDir, "out2/part-m-00000"); SequenceFile.Reader reader = new SequenceFile.Reader(fs, seqOutPath, mrConf); Text key = new Text(); IntWritable value = new IntWritable(); String[] words = fileContent.split(" "); Assert.assertEquals(words.length, textOutput.length); LOG.info("Verifying file contents"); for (int i = 0; i < words.length; i++) { Assert.assertEquals((i + 1) + "\t" + words[i], textOutput[i]); reader.next(key, value); Assert.assertEquals(words[i], key.toString()); Assert.assertEquals((i + 1), value.get()); } Assert.assertFalse(reader.next(key, value)); }
public static void refreshSuperUserGroupsConfiguration() { //load server side configuration; refreshSuperUserGroupsConfiguration(new Configuration()); }
@Test(expected = IllegalArgumentException.class) public void testProxyUsersWithEmptyPrefix() throws Exception { ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false), ""); }
public static Mode parse(String value) { if (StringUtils.isBlank(value)) { throw new IllegalArgumentException(ExceptionMessage.INVALID_MODE.getMessage(value)); } try { return parseNumeric(value); } catch (NumberFormatException e) { // Treat as symbolic return parseSymbolic(value); } }
@Test public void symbolicsBadNull() { mThrown.expect(IllegalArgumentException.class); mThrown.expectMessage(ExceptionMessage.INVALID_MODE.getMessage((Object) null)); ModeParser.parse(null); }
public static Set<String> getFieldsForRecordExtractor(@Nullable IngestionConfig ingestionConfig, Schema schema) { Set<String> fieldsForRecordExtractor = new HashSet<>(); if (null != ingestionConfig && (null != ingestionConfig.getSchemaConformingTransformerConfig() || null != ingestionConfig.getSchemaConformingTransformerV2Config())) { // The SchemaConformingTransformer requires that all fields are extracted, indicated by returning an empty set // here. Compared to extracting the fields specified below, extracting all fields should be a superset. return fieldsForRecordExtractor; } extractFieldsFromIngestionConfig(ingestionConfig, fieldsForRecordExtractor); extractFieldsFromSchema(schema, fieldsForRecordExtractor); fieldsForRecordExtractor = getFieldsToReadWithComplexType(fieldsForRecordExtractor, ingestionConfig); return fieldsForRecordExtractor; }
@Test public void testComplexTypeConfig() { IngestionConfig ingestionConfig = new IngestionConfig(); ComplexTypeConfig complexTypeConfig = new ComplexTypeConfig(null, "__", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE, null); Schema schema = new Schema(); ingestionConfig.setComplexTypeConfig(complexTypeConfig); schema.addField(new DimensionFieldSpec("a_b__c_d", FieldSpec.DataType.STRING, true)); schema.addField(new DimensionFieldSpec("f_d", FieldSpec.DataType.STRING, false)); schema.addField(new DimensionFieldSpec("ab__cd", FieldSpec.DataType.STRING, true)); Set<String> fields = IngestionUtils.getFieldsForRecordExtractor(ingestionConfig, schema); Assert.assertEquals(fields.size(), 3); Assert.assertTrue(fields.containsAll(Sets.newHashSet("a_b", "f_d", "ab"))); }
@Override public int handshake(final ChannelHandlerContext context) { int result = ConnectionIdGenerator.getInstance().nextId(); connectionPhase = MySQLConnectionPhase.AUTH_PHASE_FAST_PATH; boolean sslEnabled = ProxySSLContext.getInstance().isSSLEnabled(); if (sslEnabled) { context.pipeline().addFirst(MySQLSSLRequestHandler.class.getSimpleName(), new MySQLSSLRequestHandler()); } context.writeAndFlush(new MySQLHandshakePacket(result, sslEnabled, authPluginData)); MySQLStatementIdGenerator.getInstance().registerConnection(result); return result; }
@Test void assertHandshakeWithSSLNotEnabled() { ChannelHandlerContext context = mockChannelHandlerContext(); assertTrue(authenticationEngine.handshake(context) > 0); verify(context).writeAndFlush(any(MySQLHandshakePacket.class)); }
public static KStreamHolder<GenericKey> build( final KStreamHolder<?> stream, final StreamSelectKeyV1 selectKey, final RuntimeBuildContext buildContext ) { final LogicalSchema sourceSchema = stream.getSchema(); final CompiledExpression expression = buildExpressionEvaluator( selectKey, buildContext, sourceSchema ); final ProcessingLogger processingLogger = buildContext .getProcessingLogger(selectKey.getProperties().getQueryContext()); final String errorMsg = "Error extracting new key using expression " + selectKey.getKeyExpression(); final Function<GenericRow, Object> evaluator = val -> expression .evaluate(val, null, processingLogger, () -> errorMsg); final LogicalSchema resultSchema = new StepSchemaResolver(buildContext.getKsqlConfig(), buildContext.getFunctionRegistry()).resolve(selectKey, sourceSchema); final KStream<?, GenericRow> kstream = stream.getStream(); final KStream<GenericKey, GenericRow> rekeyed = kstream .filter((key, val) -> val != null && evaluator.apply(val) != null) .selectKey((key, val) -> GenericKey.genericKey(evaluator.apply(val))); return new KStreamHolder<>( rekeyed, resultSchema, ExecutionKeyFactory.unwindowed(buildContext) ); }
@Test public void shouldIgnoreNullNonKeyColumns() { // When: selectKey.build(planBuilder, planInfo); // Then: verify(kstream).filter(predicateCaptor.capture()); final Predicate<GenericKey, GenericRow> predicate = getPredicate(); assertThat(predicate.test(SOURCE_KEY, value(null, A_BOI, 0, "dre")), is(true)); }
@Transactional public void update(MemberDto memberDto, Long id, UpdateCategoryRequest updateCategoryRequest) { Member member = memberRepository.fetchById(memberDto.id()); validateDuplicatedCategory(updateCategoryRequest.name(), member); Category category = categoryRepository.fetchById(id); validateAuthorizeMember(category, member); category.updateName(updateCategoryRequest.name()); }
@Test @DisplayName("카테고리 수정 성공") void updateCategorySuccess() { String updateCategoryName = "updateName"; Member member = memberRepository.save(MemberFixture.memberFixture()); Category savedCategory = categoryRepository.save(new Category("category1", member)); categoryService.update(MemberDto.from(member), savedCategory.getId(), new UpdateCategoryRequest(updateCategoryName)); assertThat(categoryRepository.fetchById(savedCategory.getId()).getName()).isEqualTo(updateCategoryName); }
public static <T> void forEachWithIndex(Iterable<T> iterable, ObjectIntProcedure<? super T> procedure) { FJIterate.forEachWithIndex(iterable, procedure, FJIterate.FORK_JOIN_POOL); }
@Test public void testForEachWithIndexToArrayUsingFastList() { Integer[] array = new Integer[200]; MutableList<Integer> list = new FastList<>(Interval.oneTo(200)); assertTrue(ArrayIterate.allSatisfy(array, Predicates.isNull())); FJIterate.forEachWithIndex(list, (each, index) -> array[index] = each, 10, 10); assertArrayEquals(array, list.toArray(new Integer[]{})); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testInInt96() throws Exception { // INT96 ordering is undefined => no filtering shall be done BinaryColumn b = binaryColumn("int96_field"); Set<Binary> set1 = new HashSet<>(); set1.add(toBinary("-2", 12)); set1.add(toBinary("-0", 12)); set1.add(toBinary("12345", 12)); FilterPredicate predIn1 = in(b, set1); FilterPredicate predNotIn1 = notIn(b, set1); assertFalse("Should not drop block for in (-2, -0, 12345)", canDrop(predIn1, ccmd, dictionaries)); assertFalse("Should not drop block for notIn (-2, -0, 12345)", canDrop(predNotIn1, ccmd, dictionaries)); Set<Binary> set2 = new HashSet<>(); set2.add(toBinary("-2", 17)); set2.add(toBinary("12345", 17)); set2.add(toBinary("-789", 17)); FilterPredicate predIn2 = in(b, set2); FilterPredicate predNotIn2 = notIn(b, set2); assertFalse("Should not drop block for in (-2, 12345, -789)", canDrop(predIn2, ccmd, dictionaries)); assertFalse("Should not drop block for notIn (-2, 12345, -789)", canDrop(predNotIn2, ccmd, dictionaries)); Set<Binary> set3 = new HashSet<>(); set3.add(null); FilterPredicate predIn3 = in(b, set3); FilterPredicate predNotIn3 = notIn(b, set3); assertFalse("Should not drop block for null", canDrop(predIn3, ccmd, dictionaries)); assertFalse("Should not drop block for null", canDrop(predNotIn3, ccmd, dictionaries)); }
public static RestSettingBuilder head() { return all(HttpMethod.HEAD); }
@Test public void should_head() throws Exception { server.resource("targets", head("1").response(header("ETag", "Moco")) ); running(server, () -> { HttpResponse httpResponse = helper.headForResponse(remoteUrl("/targets/1")); assertThat(httpResponse.getCode(), is(200)); }); }
public Plan validateReservationDeleteRequest( ReservationSystem reservationSystem, ReservationDeleteRequest request) throws YarnException { return validateReservation(reservationSystem, request.getReservationId(), AuditConstants.DELETE_RESERVATION_REQUEST); }
@Test public void testDeleteReservationNormal() { ReservationDeleteRequest request = new ReservationDeleteRequestPBImpl(); ReservationId reservationID = ReservationSystemTestUtil.getNewReservationId(); request.setReservationId(reservationID); ReservationAllocation reservation = mock(ReservationAllocation.class); when(plan.getReservationById(reservationID)).thenReturn(reservation); Plan plan = null; try { plan = rrValidator.validateReservationDeleteRequest(rSystem, request); } catch (YarnException e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(plan); }
@CheckReturnValue protected final boolean tryEmit(int ordinal, @Nonnull Object item) { return outbox.offer(ordinal, item); }
@Test public void when_tryEmitTo1And2_then_emittedTo1And2() { // When boolean emitted = p.tryEmit(ORDINALS_1_2, MOCK_ITEM); // Then assertTrue(emitted); validateReceptionAtOrdinals(MOCK_ITEM, ORDINALS_1_2); }
@Override public HttpResponseOutputStream<FileEntity> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String uploadUri; FileUploadPartEntity uploadPartEntity = null; if(StringUtils.isBlank(status.getUrl())) { uploadPartEntity = new BrickUploadFeature(session, this).startUpload(file); uploadUri = uploadPartEntity.getUploadUri(); } else { uploadUri = status.getUrl(); } final HttpResponseOutputStream<FileEntity> stream = this.write(file, status, new DelayedHttpEntityCallable<FileEntity>(file) { @Override public FileEntity call(final HttpEntity entity) throws BackgroundException { try { final HttpPut request = new HttpPut(uploadUri); request.setEntity(entity); request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE); final HttpResponse response = session.getClient().execute(request); // Validate response try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: if(log.isInfoEnabled()) { log.info(String.format("Received response %s for part number %d", response, status.getPart())); } // Upload complete if(response.containsHeader("ETag")) { if(file.getType().contains(Path.Type.encrypted)) { log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file)); } else { if(HashAlgorithm.md5.equals(status.getChecksum().algorithm)) { final Checksum etag = Checksum.parse(StringUtils.remove(response.getFirstHeader("ETag").getValue(), '"')); if(!status.getChecksum().equals(etag)) { throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between {0} hash {1} of uploaded data and ETag {2} returned by the server", etag.algorithm.toString(), status.getChecksum().hash, etag.hash)); } } } } else { if(log.isDebugEnabled()) { log.debug("No ETag header in response available"); } } return null; default: EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", new HttpResponseException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } finally { EntityUtils.consume(response.getEntity()); } } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } } @Override public long getContentLength() { return status.getLength(); } }); if(StringUtils.isBlank(status.getUrl())) { final String ref = uploadPartEntity.getRef(); return new HttpResponseOutputStream<FileEntity>(new ProxyOutputStream(stream), new BrickAttributesFinderFeature(session), status) { private final AtomicBoolean close = new AtomicBoolean(); @Override public FileEntity getStatus() throws BackgroundException { return stream.getStatus(); } @Override public void close() throws IOException { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } super.close(); try { new BrickUploadFeature(session, BrickWriteFeature.this) .completeUpload(file, ref, status, Collections.singletonList(status)); } catch(BackgroundException e) { throw new IOException(e.getMessage(), e); } finally { close.set(true); } } }; } return stream; }
@Test public void testWriteZeroLength() throws Exception { final BrickWriteFeature feature = new BrickWriteFeature(session); final Path container = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final byte[] content = RandomUtils.nextBytes(0); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus().withLength(content.length); final HttpResponseOutputStream<FileEntity> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); assertEquals(content.length, IOUtils.copyLarge(in, out)); in.close(); out.close(); assertTrue(new DefaultFindFeature(session).find(file)); final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(file); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new BrickReadFeature(session).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new BrickDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@NonNull public final T getView() { return view; }
@Test public void testReturnsWrappedView() { assertEquals(view, target.getView()); }
public AccessPrivilege getAccessPrivilege(InetAddress addr) { return getAccessPrivilege(addr.getHostAddress(), addr.getCanonicalHostName()); }
@Test public void testRegexHostRO() { NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "[a-z]+.b.com"); Assert.assertEquals(AccessPrivilege.READ_ONLY, matcher.getAccessPrivilege(address1, hostname1)); // address1 will hit the cache Assert.assertEquals(AccessPrivilege.READ_ONLY, matcher.getAccessPrivilege(address1, hostname2)); }
@Nonnull @Override public Sketch getResult() { return unionAll(); }
@Test public void testAccumulatorWithSingleSketch() { UpdateSketch input = Sketches.updateSketchBuilder().build(); IntStream.range(0, 1000).forEach(input::update); Sketch sketch = input.compact(); ThetaSketchAccumulator accumulator = new ThetaSketchAccumulator(_setOperationBuilder, 2); accumulator.apply(sketch); Assert.assertFalse(accumulator.isEmpty()); Assert.assertEquals(accumulator.getResult().getEstimate(), sketch.getEstimate()); }
@Override public String asymmetricEncryptType() { return "EC"; }
@Test public void asymmetricEncryptType() { SAECEncrypt ecEncrypt = new SAECEncrypt(); Assert.assertEquals("EC", ecEncrypt.asymmetricEncryptType()); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatSuppression() { // Given: final String statementString = "CREATE STREAM S AS SELECT ITEMID, COUNT(*) FROM ORDERS WINDOW TUMBLING (SIZE 7 DAYS, GRACE PERIOD 1 DAY) GROUP BY ITEMID EMIT FINAL;"; final Statement statement = parseSingle(statementString); final String result = SqlFormatter.formatSql(statement); assertThat(result, is("CREATE STREAM S AS SELECT\n" + " ITEMID,\n" + " COUNT(*)\n" + "FROM ORDERS ORDERS\n" + "WINDOW TUMBLING ( SIZE 7 DAYS , GRACE PERIOD 1 DAYS ) \n" + "GROUP BY ITEMID\n" + "EMIT FINAL")); }
public void processOnce() throws IOException { // set status of query to OK. ctx.getState().reset(); executor = null; // reset sequence id of MySQL protocol final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); // read packet from channel try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { // when this happened, timeout checker close this channel // killed flag in ctx has been already set, just return return; } // dispatch dispatch(); // finalize finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); }
@Test public void testPing() throws IOException { ConnectContext ctx = initMockContext(mockChannel(pingPacket), GlobalStateMgr.getCurrentState()); ConnectProcessor processor = new ConnectProcessor(ctx); processor.processOnce(); Assert.assertEquals(MysqlCommand.COM_PING, myContext.getCommand()); Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket); Assert.assertFalse(myContext.isKilled()); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedWithArrowOneString() { String[] forwardedFields = {"f0->f0;f1->f2"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, fiveIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(2); forwardedFields[0] = "0->0;1->2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, fiveIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(2); }
public static int check(String passwd) { if (null == passwd) { throw new IllegalArgumentException("password is empty"); } int len = passwd.length(); int level = 0; // increase points if (countLetter(passwd, CHAR_TYPE.NUM) > 0) { level++; } if (countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0) { level++; } if (len > 4 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 4 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 8 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (len > 8 && countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 || countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (len > 10 && countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 3) { level++; } if (countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 6) { level++; } if (len > 12) { level++; if (len >= 16) { level++; } } // decrease points if ("abcdefghijklmnopqrstuvwxyz".indexOf(passwd) > 0 || "ABCDEFGHIJKLMNOPQRSTUVWXYZ".indexOf(passwd) > 0) { level--; } if ("qwertyuiop".indexOf(passwd) > 0 || "asdfghjkl".indexOf(passwd) > 0 || "zxcvbnm".indexOf(passwd) > 0) { level--; } if (StrUtil.isNumeric(passwd) && ("01234567890".indexOf(passwd) > 0 || "09876543210".indexOf(passwd) > 0)) { level--; } if (countLetter(passwd, CHAR_TYPE.NUM) == len || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) == len || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) == len) { level--; } if (len % 2 == 0) { // aaabbb String part1 = passwd.substring(0, len / 2); String part2 = passwd.substring(len / 2); if (part1.equals(part2)) { level--; } if (StrUtil.isCharEquals(part1) && StrUtil.isCharEquals(part2)) { level--; } } if (len % 3 == 0) { // ababab String part1 = passwd.substring(0, len / 3); String part2 = passwd.substring(len / 3, len / 3 * 2); String part3 = passwd.substring(len / 3 * 2); if (part1.equals(part2) && part2.equals(part3)) { level--; } } if (StrUtil.isNumeric(passwd) && len >= 6 && len <= 8) { // 19881010 or 881010 int year = 0; if (len == 8 || len == 6) { year = Integer.parseInt(passwd.substring(0, len - 4)); } int size = sizeOfInt(year); int month = Integer.parseInt(passwd.substring(size, size + 2)); int day = Integer.parseInt(passwd.substring(size + 2, len)); if (year >= 1950 && year < 2050 && month >= 1 && month <= 12 && day >= 1 && day <= 31) { level--; } } for (String s : DICTIONARY) { if (passwd.equals(s) || s.contains(passwd)) { level--; break; } } if (len <= 6) { level--; if (len <= 4) { level--; if (len <= 3) { level = 0; } } } if (StrUtil.isCharEquals(passwd)) { level = 0; } if (level < 0) { level = 0; } return level; }
@Test public void strengthNumberTest(){ String passwd = "9999999999999"; assertEquals(0, PasswdStrength.check(passwd)); }
public static int getRuleCount(Engine engine) { if (engine.hasObject(SUPPRESSION_OBJECT_KEY)) { @SuppressWarnings("unchecked") final List<SuppressionRule> rules = (List<SuppressionRule>) engine.getObject(SUPPRESSION_OBJECT_KEY); return rules.size(); } return 0; }
@Test public void testGetRulesFromMultipleSuppressionFiles() throws Exception { final int rulesInCoreFile = getNumberOfRulesLoadedInCoreFile(); // GIVEN suppression rules from one file final int rulesInFirstFile = getNumberOfRulesLoadedFromPath(SUPPRESSIONS_FILE) - rulesInCoreFile; // AND suppression rules from another file final int rulesInSecondFile = getNumberOfRulesLoadedFromPath(OTHER_SUPPRESSIONS_FILE) - rulesInCoreFile; // WHEN initializing with both suppression files final String[] suppressionFiles = {SUPPRESSIONS_FILE, OTHER_SUPPRESSIONS_FILE}; getSettings().setArrayIfNotEmpty(KEYS.SUPPRESSION_FILE, suppressionFiles); instance.initialize(getSettings()); Engine engine = new Engine(getSettings()); instance.prepare(engine); // THEN rules from both files were loaded final int expectedSize = rulesInFirstFile + rulesInSecondFile + rulesInCoreFile; assertThat("Expected suppressions from both files", instance.getRuleCount(engine), is(expectedSize)); }
public static CompositeData parseComposite(URI uri) throws URISyntaxException { CompositeData rc = new CompositeData(); rc.scheme = uri.getScheme(); String ssp = stripPrefix(uri.getRawSchemeSpecificPart().trim(), "//").trim(); parseComposite(uri, rc, ssp); rc.fragment = uri.getFragment(); return rc; }
@Test public void testComposite() throws Exception { URI uri = new URI("test:(part1://host,part2://(sub1://part,sube2:part))"); CompositeData data = URISupport.parseComposite(uri); assertEquals(2, data.getComponents().length); }
CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> createPayPalOneTimePayment( final BigDecimal amount, final String currency, final String returnUrl, final String cancelUrl, final String locale) { final CreatePayPalOneTimePaymentInput input = buildCreatePayPalOneTimePaymentInput(amount, currency, returnUrl, cancelUrl, locale); final CreatePayPalOneTimePaymentMutation mutation = new CreatePayPalOneTimePaymentMutation(input); final HttpRequest request = buildRequest(mutation); return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()) .thenApply(httpResponse -> { // IntelliJ users: type parameters error “no instance of type variable exists so that Data conforms to Data” // is not accurate; this might be fixed in Kotlin 1.8: https://youtrack.jetbrains.com/issue/KTIJ-21905/ final CreatePayPalOneTimePaymentMutation.Data data = assertSuccessAndExtractData(httpResponse, mutation); return data.createPayPalOneTimePayment; }); }
@Test void createPayPalOneTimePaymentHttpError() { final HttpResponse<Object> response = mock(HttpResponse.class); when(httpClient.sendAsync(any(), any())) .thenReturn(CompletableFuture.completedFuture(response)); when(response.statusCode()) .thenReturn(500); final HttpHeaders httpheaders = mock(HttpHeaders.class); when(httpheaders.firstValue(any())).thenReturn(Optional.empty()); when(response.headers()) .thenReturn(httpheaders); final CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> future = braintreeGraphqlClient.createPayPalOneTimePayment( BigDecimal.ONE, CURRENCY, RETURN_URL, CANCEL_URL, LOCALE); assertTimeoutPreemptively(Duration.ofSeconds(3), () -> { final ExecutionException e = assertThrows(ExecutionException.class, future::get); assertTrue(e.getCause() instanceof ServiceUnavailableException); }); }
@Override public PageResult<SmsLogDO> getSmsLogPage(SmsLogPageReqVO pageReqVO) { return smsLogMapper.selectPage(pageReqVO); }
@Test public void testGetSmsLogPage() { // mock 数据 SmsLogDO dbSmsLog = randomSmsLogDO(o -> { // 等会查询到 o.setChannelId(1L); o.setTemplateId(10L); o.setMobile("15601691300"); o.setSendStatus(SmsSendStatusEnum.INIT.getStatus()); o.setSendTime(buildTime(2020, 11, 11)); o.setReceiveStatus(SmsReceiveStatusEnum.INIT.getStatus()); o.setReceiveTime(buildTime(2021, 11, 11)); }); smsLogMapper.insert(dbSmsLog); // 测试 channelId 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setChannelId(2L))); // 测试 templateId 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setTemplateId(20L))); // 测试 mobile 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setMobile("18818260999"))); // 测试 sendStatus 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setSendStatus(SmsSendStatusEnum.IGNORE.getStatus()))); // 测试 sendTime 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setSendTime(buildTime(2020, 12, 12)))); // 测试 receiveStatus 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setReceiveStatus(SmsReceiveStatusEnum.SUCCESS.getStatus()))); // 测试 receiveTime 不匹配 smsLogMapper.insert(cloneIgnoreId(dbSmsLog, o -> o.setReceiveTime(buildTime(2021, 12, 12)))); // 准备参数 SmsLogPageReqVO reqVO = new SmsLogPageReqVO(); reqVO.setChannelId(1L); reqVO.setTemplateId(10L); reqVO.setMobile("156"); reqVO.setSendStatus(SmsSendStatusEnum.INIT.getStatus()); reqVO.setSendTime(buildBetweenTime(2020, 11, 1, 2020, 11, 30)); reqVO.setReceiveStatus(SmsReceiveStatusEnum.INIT.getStatus()); reqVO.setReceiveTime(buildBetweenTime(2021, 11, 1, 2021, 11, 30)); // 调用 PageResult<SmsLogDO> pageResult = smsLogService.getSmsLogPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbSmsLog, pageResult.getList().get(0)); }
public static long parseLong(String num, long defaultLong) { if (num == null) { return defaultLong; } else { try { return Long.parseLong(num); } catch (Exception e) { return defaultLong; } } }
@Test public void testParseLong() { Assert.assertEquals(CommonUtils.parseLong("", 123L), 123L); Assert.assertEquals(CommonUtils.parseLong("xxx", 123L), 123L); Assert.assertEquals(CommonUtils.parseLong(null, 123L), 123L); Assert.assertEquals(CommonUtils.parseLong("12345", 123L), 12345L); }
public static int validateValidHeaderValue(CharSequence value) { int length = value.length(); if (length == 0) { return -1; } if (value instanceof AsciiString) { return verifyValidHeaderValueAsciiString((AsciiString) value); } return verifyValidHeaderValueCharSequence(value); }
@DisabledForJreRange(max = JRE.JAVA_17) // This test is much too slow on older Java versions. @Test void headerValueValidationMustRejectAllValuesRejectedByOldAlgorithm() { byte[] array = new byte[4]; final ByteBuffer buffer = ByteBuffer.wrap(array); final AsciiString asciiString = new AsciiString(buffer, false); CharSequence charSequence = asCharSequence(asciiString); int i = Integer.MIN_VALUE; Supplier<String> failureMessageSupplier = new Supplier<String>() { @Override public String get() { return "validation mismatch on string '" + asciiString + "', iteration " + buffer.getInt(0); } }; do { buffer.putInt(0, i); try { oldHeaderValueValidationAlgorithm(asciiString); } catch (IllegalArgumentException ignore) { assertNotEquals(-1, validateValidHeaderValue(asciiString), failureMessageSupplier); assertNotEquals(-1, validateValidHeaderValue(charSequence), failureMessageSupplier); } i++; } while (i != Integer.MIN_VALUE); }
@Udf public Integer length(@UdfParameter final String jsonArray) { if (jsonArray == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonArray); if (node.isMissingNode() || !node.isArray()) { return null; } return node.size(); }
@Test public void shouldReturnNestedArrayLength() { // When: final Integer result = udf.length("[1, [1, [2]], 3]"); // Then: assertEquals(Integer.valueOf(3), result); }
public static void validatePolymorhpicInfo(PolymorphicInfo info) { if (info.getPcaVersion() != 1) { logger.error("Unsupported PCA version {}", info.getPcaVersion()); throw new ClientException("Polymorphic info is not correct"); } int polymorphicFlags = info.getFlags().intValue(); boolean randomizedPip = (polymorphicFlags & 32) != 0; boolean compressedEncoding = (polymorphicFlags & 4) != 0; if (!randomizedPip || !compressedEncoding) { logger.error("Polymorphic flags incorrect randomizedPip: {} compressedEncoding: {}", randomizedPip, compressedEncoding); throw new ClientException("Polymorphic info is not correct"); } }
@Test public void invalidPolymorphicInfoVersion() { final PolymorphicInfo info = mapper.read( Hex.decode("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"), PolymorphicInfo.class); ClientException thrown = assertThrows(ClientException.class, () -> CardValidations.validatePolymorhpicInfo(info)); assertEquals("Polymorphic info is not correct", thrown.getMessage()); }
public static void deleteIfExists(final File file) { try { Files.deleteIfExists(file.toPath()); } catch (final IOException ex) { LangUtil.rethrowUnchecked(ex); } }
@Test void deleteIfExistsFailsOnNonEmptyDirectory() throws IOException { final Path dir = tempDir.resolve("dir"); Files.createDirectory(dir); Files.createFile(dir.resolve("file.txt")); assertThrows(DirectoryNotEmptyException.class, () -> IoUtil.deleteIfExists(dir.toFile())); }
@Override public String toString() { long inKB = size / KB_COEFFICIENT; if (inKB == 0) return size + " Bytes"; long inMB = size / MB_COEFFICIENT; if (inMB == 0) { return inKB + " KB"; } long inGB = size / GB_COEFFICIENT; if (inGB == 0) { return inMB + " MB"; } return inGB + " GB"; }
@Test public void testToString() { { FileSize fs = new FileSize(8); assertEquals("8 Bytes", fs.toString()); } { FileSize fs = new FileSize(8 * 1024 + 3); assertEquals("8 KB", fs.toString()); } { FileSize fs = new FileSize(8 * 1024 * 1024 + 3 * 1024); assertEquals("8 MB", fs.toString()); } { FileSize fs = new FileSize(8 * 1024 * 1024 * 1024L); assertEquals("8 GB", fs.toString()); } }
@Provides TcsClient providesTcsClient( @Nullable @CallbackAddress String callbackAddress, @Nullable @CallbackPort Integer callbackPort, @Nullable @CallbackPollingUri String pollingUri, HttpClient httpClient) { // when all tcs config are not set, we provide an invalid {@link TcsClient} // so that {@link TcsClient#isCallbackServerEnabled} returns false. if (callbackAddress == null && callbackPort == null && pollingUri == null) { return new TcsClient("", 0, "", checkNotNull(httpClient)); } checkNotNull(callbackAddress); checkNotNull(callbackPort); checkNotNull(pollingUri); checkArgument( InetAddresses.isInetAddress(callbackAddress) || InternetDomainName.isValid(callbackAddress), "Invalid callback address specified"); checkArgument(callbackPort > 0 && callbackPort < 65536, "Invalid port number specified"); return new TcsClient(callbackAddress, callbackPort, pollingUri, checkNotNull(httpClient)); }
@Test public void providesTcsClient_withGoodConfig_returnsValidTcsClient() { TcsClient client = module.providesTcsClient(DOMAIN_1, PORT_1, POLLING_URI_1, httpClient); assertTrue(client.isCallbackServerEnabled()); }
public static boolean equals(String a, String b) { if (a == null) { return b == null; } return a.equals(b); }
@Test public void testEquals() { Assert.assertFalse(StringUtils.equals(null, "")); Assert.assertFalse(StringUtils.equals("foo", "bar")); Assert.assertFalse(StringUtils.equals("foo", "FOO")); Assert.assertTrue(StringUtils.equals(null, null)); Assert.assertTrue(StringUtils.equals("foo", "foo")); }
public String generate() { StringBuilder sb = new StringBuilder(); sb.append(firstCharacterAlphabet.charAt(rng.nextInt(firstCharacterAlphabet.length()))); for (int i = 1; i < length; i++) { sb.append(alphabet.charAt(rng.nextInt(alphabet.length()))); } return sb.toString(); }
@Test public void alphabet() { PasswordGenerator generator = new PasswordGenerator(10, "ab", "ab"); assertThat(generator.generate().matches("[ab]{10}"), is(true)); }
static ImmutableByteSequence prefix(int size, long prefixBits, byte val) { checkArgument(val == 0 || val == (byte) 0xff, "Val must be 0 or 0xff"); byte[] bytes = new byte[size]; int prefixBytes = (int) (prefixBits / Byte.SIZE); Arrays.fill(bytes, 0, prefixBytes, val); Arrays.fill(bytes, prefixBytes, bytes.length, (byte) ~val); int partialBits = (int) (prefixBits % Byte.SIZE); if (partialBits != 0) { bytes[prefixBytes] = val == 0 ? (byte) (0xff >> partialBits) : (byte) (0xff << Byte.SIZE - partialBits); } return new ImmutableByteSequence(ByteBuffer.wrap(bytes)); }
@Test public void testBadPrefixVal() { thrown.expect(IllegalArgumentException.class); thrown.reportMissingExceptionWithMessage( "Expect IllegalArgumentException due to val = 0x7"); ImmutableByteSequence.prefix(5, 10, (byte) 0x7); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final StoregateApiClient client = session.getClient(); final MoveFileRequest move = new MoveFileRequest() .name(renamed.getName()) .parentID(fileid.getFileId(renamed.getParent())) .mode(1); // Overwrite final HttpEntityEnclosingRequestBase request; request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file))); if(status.getLockId() != null) { request.addHeader("X-Lock-Id", status.getLockId().toString()); } request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move), ContentType.create("application/json", StandardCharsets.UTF_8.name()))); request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpResponse response = client.getClient().execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_NO_CONTENT: final PathAttributes attr = new PathAttributes(file.attributes()); fileid.cache(file, null); fileid.cache(renamed, attr.getFileId()); return renamed.withAttributes(attr); default: throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}", new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } finally { EntityUtils.consume(response.getEntity()); } } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveToDifferentFolder() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path folder1 = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path folder2 = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(folder1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateTouchFeature(session, nodeid).touch(test, new TransferStatus()); final Path target = new Path(folder2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateMoveFeature(session, nodeid).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new DefaultFindFeature(session).find(test)); assertTrue(new DefaultFindFeature(session).find(target)); assertEquals(0, session.getMetrics().get(Copy.class)); new StoregateDeleteFeature(session, nodeid).delete(Arrays.asList(folder1, folder2), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static <E> E checkInstanceOf(Class<E> type, Object object, String errorMessage) { isNotNull(type, "type"); if (!type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return (E) object; }
@Test(expected = IllegalArgumentException.class) public void test_checkInstanceOf_withNullObject() { checkInstanceOf(Number.class, null, "argumentName"); }
@Override public String render(String text) { if (StringUtils.isBlank(text)) { return ""; } if (regex.isEmpty() || link.isEmpty()) { Comment comment = new Comment(); comment.escapeAndAdd(text); return comment.render(); } try { Matcher matcher = Pattern.compile(regex).matcher(text); int start = 0; Comment comment = new Comment(); while (hasMatch(matcher)) { comment.escapeAndAdd(text.substring(start, matcher.start())); comment.add(dynamicLink(matcher)); start = matcher.end(); } comment.escapeAndAdd(text.substring(start)); return comment.render(); } catch (PatternSyntaxException e) { LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage()); } return text; }
@Test public void shouldReplaceBasedOnRegexInsteadOfPureStringReplacement() throws Exception { String link = "http://mingle05/projects/cce/cards/${ID}"; String regex = "evo-(\\d+)"; trackingTool = new DefaultCommentRenderer(link, regex); String result = trackingTool.render("Replace evo-1994. Don't replace 1994"); assertThat(result, containsString( "<a href=\"http://mingle05/projects/cce/cards/1994\" " + "target=\"story_tracker\">evo-1994</a>")); assertThat(result, containsString("Don't replace 1994")); }
@Override public T acquire() { return acquire(WAIT_INDEFINITELY, null); }
@Test public void resourcePoolBlocking() throws InterruptedException { mThrown.expect(RuntimeException.class); final int POOL_SIZE = 2; @SuppressWarnings("unchecked") ConcurrentLinkedQueue<Integer> queue = mock(ConcurrentLinkedQueue.class); TestResourcePool testPool = new TestResourcePool(POOL_SIZE, queue); when(queue.isEmpty()).thenReturn(true); when(queue.poll()).thenThrow(new InterruptedException()); for (int i = 0; i < POOL_SIZE + 1; i++) { testPool.acquire(); } }
public static Schema reassignIds(Schema schema, Schema idSourceSchema) { return reassignIds(schema, idSourceSchema, true); }
@Test public void testAssignIncreasingFreshIdNewIdentifier() { Schema schema = new Schema( Lists.newArrayList( required(10, "a", Types.IntegerType.get()), required(11, "A", Types.IntegerType.get())), Sets.newHashSet(10)); Schema sourceSchema = new Schema( Lists.newArrayList( required(1, "a", Types.IntegerType.get()), required(2, "A", Types.IntegerType.get()))); final Schema actualSchema = TypeUtil.reassignIds(schema, sourceSchema); assertThat(actualSchema.asStruct()).isEqualTo(sourceSchema.asStruct()); assertThat(actualSchema.identifierFieldIds()) .as("source schema missing identifier should not impact refreshing new identifier") .isEqualTo(Sets.newHashSet(sourceSchema.findField("a").fieldId())); }
public static NetworkPolicyPeer createPeer(Map<String, String> podSelector, LabelSelector namespaceSelector) { return new NetworkPolicyPeerBuilder() .withNewPodSelector() .withMatchLabels(podSelector) .endPodSelector() .withNamespaceSelector(namespaceSelector) .build(); }
@Test public void testCreatePeerWithEmptyLabels() { NetworkPolicyPeer peer = NetworkPolicyUtils.createPeer(Map.of()); assertThat(peer.getNamespaceSelector(), is(nullValue())); assertThat(peer.getPodSelector().getMatchLabels(), is(Map.of())); }
public void deleteEtcdPathRecursive(final String path) { DeleteOption option = DeleteOption.newBuilder() .withPrefix(ByteSequence.from(path, StandardCharsets.UTF_8)) .build(); try { client.getKVClient().delete(ByteSequence.from(path, StandardCharsets.UTF_8), option).get(10, TimeUnit.SECONDS); } catch (Exception e) { LOG.error("delete node of recursive error.", e); throw new ShenyuException(e.getMessage()); } }
@Test public void deleteEtcdPathRecursive() { when(client.getKVClient().delete(any(ByteSequence.class), any(DeleteOption.class))).thenReturn(mock(CompletableFuture.class)); etcdClient.deleteEtcdPathRecursive(TEST_KEY); verify(client.getKVClient(), times(1)).delete(any(ByteSequence.class), any(DeleteOption.class)); }
@Override public String render(String text) { if (StringUtils.isBlank(text)) { return ""; } if (regex.isEmpty() || link.isEmpty()) { Comment comment = new Comment(); comment.escapeAndAdd(text); return comment.render(); } try { Matcher matcher = Pattern.compile(regex).matcher(text); int start = 0; Comment comment = new Comment(); while (hasMatch(matcher)) { comment.escapeAndAdd(text.substring(start, matcher.start())); comment.add(dynamicLink(matcher)); start = matcher.end(); } comment.escapeAndAdd(text.substring(start)); return comment.render(); } catch (PatternSyntaxException e) { LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage()); } return text; }
@Test public void shouldRenderStringWithSpecifiedRegexAndLinkIfHasGroupsAndOtherThanFirstMaterializes() throws Exception { String link = "http://mingle05/projects/cce/cards/${ID}"; String regex = "evo-(\\d+)|evo-(ab)"; trackingTool = new DefaultCommentRenderer(link, regex); String result = trackingTool.render("evo-abc: checkin message"); assertThat(result, is("<a href=\"" + "http://mingle05/projects/cce/cards/ab\" target=\"story_tracker\">evo-ab</a>c: checkin message")); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testNonForwardedInvalidNesting() { String[] nonForwardedFields = {"f0.f4"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, nonForwardedFields, null, nestedTupleType, nestedTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
List<Condition> run(boolean useKRaft) { List<Condition> warnings = new ArrayList<>(); checkKafkaReplicationConfig(warnings); checkKafkaBrokersStorage(warnings); if (useKRaft) { // Additional checks done for KRaft clusters checkKRaftControllerStorage(warnings); checkKRaftControllerCount(warnings); checkKafkaMetadataVersion(warnings); checkInterBrokerProtocolVersionInKRaft(warnings); checkLogMessageFormatVersionInKRaft(warnings); } else { // Additional checks done for ZooKeeper-based clusters checkKafkaLogMessageFormatVersion(warnings); checkKafkaInterBrokerProtocolVersion(warnings); checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings); } return warnings; }
@Test public void checkEmptyWarnings() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); assertThat(checker.run(true), empty()); checker = generateChecker(KAFKA, List.of(MIXED), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); assertThat(checker.run(true), empty()); }
public static ByteBuf wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_BUFFER; } return new UnpooledHeapByteBuf(ALLOC, array, array.length); }
@Test public void testHashCode() { Map<byte[], Integer> map = new LinkedHashMap<byte[], Integer>(); map.put(EMPTY_BYTES, 1); map.put(new byte[] { 1 }, 32); map.put(new byte[] { 2 }, 33); map.put(new byte[] { 0, 1 }, 962); map.put(new byte[] { 1, 2 }, 994); map.put(new byte[] { 0, 1, 2, 3, 4, 5 }, 63504931); map.put(new byte[] { 6, 7, 8, 9, 0, 1 }, (int) 97180294697L); map.put(new byte[] { -1, -1, -1, (byte) 0xE1 }, 1); for (Entry<byte[], Integer> e: map.entrySet()) { ByteBuf buffer = wrappedBuffer(e.getKey()); assertEquals( e.getValue().intValue(), ByteBufUtil.hashCode(buffer)); buffer.release(); } }
public void addConfigurations(List<ConfigurationProperty> props) { this.addAll(props); }
@Test public void addConfigurations_shouldAddConfigurationsWithValue() throws Exception { ConfigurationProperty property = new ConfigurationProperty(new ConfigurationKey("username"), new ConfigurationValue("some_name")); ElasticProfile profile = new ElasticProfile("id", "prod-cluster"); profile.addConfigurations(List.of(property)); assertThat(profile.size(), is(1)); assertThat(profile, contains(new ConfigurationProperty(new ConfigurationKey("username"), new ConfigurationValue("some_name")))); }
@Override public @Nullable V put(K key, V value) { return put(key, value, expiry(), /* onlyIfAbsent */ false); }
@CheckMaxLogLevel(ERROR) @Test(dataProvider = "caches") @CacheSpec(population = Population.EMPTY, keys = ReferenceType.STRONG) public void brokenEquality_put(BoundedLocalCache<MutableInt, Int> cache, CacheContext context) { testForBrokenEquality(cache, context, key -> { var value = cache.put(key, context.absentValue()); assertThat(value).isEqualTo(context.absentValue()); }); }
@SuppressWarnings("unchecked") static Object extractFromRecordValue(Object recordValue, String fieldName) { List<String> fields = Splitter.on('.').splitToList(fieldName); if (recordValue instanceof Struct) { return valueFromStruct((Struct) recordValue, fields); } else if (recordValue instanceof Map) { return valueFromMap((Map<String, ?>) recordValue, fields); } else { throw new UnsupportedOperationException( "Cannot extract value from type: " + recordValue.getClass().getName()); } }
@Test public void testExtractFromRecordValueStructNull() { Schema valSchema = SchemaBuilder.struct().field("key", Schema.INT64_SCHEMA).build(); Struct val = new Struct(valSchema).put("key", 123L); Object result = RecordUtils.extractFromRecordValue(val, ""); assertThat(result).isNull(); result = RecordUtils.extractFromRecordValue(val, "xkey"); assertThat(result).isNull(); }
public static TokenResult acquireConcurrentToken(/*@Valid*/ String clientAddress, FlowRule rule, int acquireCount) { long flowId = rule.getClusterConfig().getFlowId(); AtomicInteger nowCalls = CurrentConcurrencyManager.get(flowId); if (nowCalls == null) { RecordLog.warn("[ConcurrentClusterFlowChecker] Fail to get nowCalls by flowId<{}>", flowId); return new TokenResult(TokenResultStatus.FAIL); } // check before enter the lock to improve the efficiency if (nowCalls.get() + acquireCount > calcGlobalThreshold(rule)) { ClusterServerStatLogUtil.log("concurrent|block|" + flowId, acquireCount); return new TokenResult(TokenResultStatus.BLOCKED); } // ensure the atomicity of operations // lock different nowCalls to improve the efficiency synchronized (nowCalls) { // check again whether the request can pass. if (nowCalls.get() + acquireCount > calcGlobalThreshold(rule)) { ClusterServerStatLogUtil.log("concurrent|block|" + flowId, acquireCount); return new TokenResult(TokenResultStatus.BLOCKED); } else { nowCalls.getAndAdd(acquireCount); } } ClusterServerStatLogUtil.log("concurrent|pass|" + flowId, acquireCount); TokenCacheNode node = TokenCacheNode.generateTokenCacheNode(rule, acquireCount, clientAddress); TokenCacheNodeManager.putTokenCacheNode(node.getTokenId(), node); TokenResult tokenResult = new TokenResult(TokenResultStatus.OK); tokenResult.setTokenId(node.getTokenId()); return tokenResult; }
@Test public void testReleaseExpiredToken() throws InterruptedException { ConnectionManager.addConnection("test", "127.0.0.1"); FlowRule rule = ClusterFlowRuleManager.getFlowRuleById(179L); for (int i = 0; i < 10; i++) { ConcurrentClusterFlowChecker.acquireConcurrentToken("127.0.0.1", rule, 1); } Thread.sleep(3000); Assert.assertTrue("fail to acquire and release token", CurrentConcurrencyManager.get(rule.getClusterConfig().getFlowId()).get() == 0 && TokenCacheNodeManager.getSize() == 0); }
public long mergeNumNulls(long oldValue, long newValue) { return oldValue + newValue; }
@Test public void testMergeNumNulls() { assertEquals(4, MERGER.mergeNumNulls(1, 3)); assertEquals(4, MERGER.mergeNumNulls(3, 1)); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } final Object single = inner.deserialize(topic, bytes); return Collections.singletonList(single); }
@Test public void shouldDeserializeNewStyleNulls() { // When: final List<?> result = deserializer.deserialize(TOPIC, HEADERS, (byte[]) null); // Then: assertThat(result, is(nullValue())); }
public static void tryEnrichClusterEntryPointError(@Nullable Throwable root) { tryEnrichOutOfMemoryError( root, JM_METASPACE_OOM_ERROR_MESSAGE, JM_DIRECT_OOM_ERROR_MESSAGE, JM_HEAP_SPACE_OOM_ERROR_MESSAGE); }
@Test public void testDirectMemoryOOMHandling() { OutOfMemoryError error = new OutOfMemoryError("Direct buffer memory"); ClusterEntryPointExceptionUtils.tryEnrichClusterEntryPointError(error); assertThat( error.getMessage(), is(ClusterEntryPointExceptionUtils.JM_DIRECT_OOM_ERROR_MESSAGE)); }
private boolean authenticate(final ChannelHandlerContext context, final ByteBuf message) { try { AuthenticationResult authResult = databaseProtocolFrontendEngine.getAuthenticationEngine().authenticate(context, databaseProtocolFrontendEngine.getCodecEngine().createPacketPayload(message, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get())); if (authResult.isFinished()) { connectionSession.setGrantee(new Grantee(authResult.getUsername(), authResult.getHostname())); connectionSession.setCurrentDatabaseName(authResult.getDatabase()); connectionSession.setProcessId(processEngine.connect(connectionSession.getUsedDatabaseName(), connectionSession.getConnectionContext().getGrantee())); } return authResult.isFinished(); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON if (ExpectedExceptions.isExpected(ex.getClass())) { log.debug("Exception occur: ", ex); } else { log.error("Exception occur: ", ex); } context.writeAndFlush(databaseProtocolFrontendEngine.getCommandExecuteEngine().getErrorPacket(ex)); context.close(); } finally { message.release(); } return false; }
@Test void assertChannelReadNotAuthenticatedAndExceptionOccur() throws Exception { channel.register(); RuntimeException cause = new RuntimeException("assertChannelReadNotAuthenticatedAndExceptionOccur"); doThrow(cause).when(authenticationEngine).authenticate(any(ChannelHandlerContext.class), any(PacketPayload.class)); DatabasePacket expectedPacket = mock(DatabasePacket.class); when(frontendEngine.getCommandExecuteEngine().getErrorPacket(cause)).thenReturn(expectedPacket); channel.writeInbound(Unpooled.EMPTY_BUFFER); assertThat(channel.readOutbound(), is(expectedPacket)); }
public static Sensor commitSensor(final String threadId, final StreamsMetricsImpl streamsMetrics) { return invocationRateAndCountAndAvgAndMaxLatencySensor( threadId, COMMIT, COMMIT_RATE_DESCRIPTION, COMMIT_TOTAL_DESCRIPTION, COMMIT_AVG_LATENCY_DESCRIPTION, COMMIT_MAX_LATENCY_DESCRIPTION, Sensor.RecordingLevel.INFO, streamsMetrics ); }
@Test public void shouldGetCommitSensor() { final String operation = "commit"; final String operationLatency = operation + StreamsMetricsImpl.LATENCY_SUFFIX; final String totalDescription = "The total number of calls to commit"; final String rateDescription = "The average per-second number of calls to commit"; final String avgLatencyDescription = "The average commit latency"; final String maxLatencyDescription = "The maximum commit latency"; when(streamsMetrics.threadLevelSensor(THREAD_ID, operation, RecordingLevel.INFO)).thenReturn(expectedSensor); when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap); try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) { final Sensor sensor = ThreadMetrics.commitSensor(THREAD_ID, streamsMetrics); streamsMetricsStaticMock.verify( () -> StreamsMetricsImpl.addInvocationRateAndCountToSensor( expectedSensor, THREAD_LEVEL_GROUP, tagMap, operation, rateDescription, totalDescription ) ); streamsMetricsStaticMock.verify( () -> StreamsMetricsImpl.addAvgAndMaxToSensor( expectedSensor, THREAD_LEVEL_GROUP, tagMap, operationLatency, avgLatencyDescription, maxLatencyDescription ) ); assertThat(sensor, is(expectedSensor)); } }
@Override public void open() throws Exception { super.open(); windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig()); internalTimerService = getInternalTimerService("window-timers", windowSerializer, this); triggerContext = new TriggerContext(); triggerContext.open(); StateDescriptor<ListState<RowData>, List<RowData>> windowStateDescriptor = new ListStateDescriptor<>("window-input", new RowDataSerializer(inputType)); StateDescriptor<ListState<RowData>, List<RowData>> dataRetractStateDescriptor = new ListStateDescriptor<>("data-retract", new RowDataSerializer(inputType)); this.windowAccumulateData = (InternalListState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, windowStateDescriptor); this.windowRetractData = (InternalListState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, dataRetractStateDescriptor); inputKeyAndWindow = new LinkedList<>(); windowProperty = new GenericRowData(namedProperties.length); windowAggResult = new JoinedRowData(); WindowContext windowContext = new WindowContext(); windowAssigner.open(windowContext); }
@Test void testFinishBundleTriggeredOnCheckpoint() throws Exception { Configuration conf = new Configuration(); conf.set(PythonOptions.MAX_BUNDLE_SIZE, 10); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(conf); long initialTime = 0L; ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.open(); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c2", 0L, 0L), initialTime + 1)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c4", 1L, 6000L), initialTime + 2)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c6", 2L, 10000L), initialTime + 3)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c2", "c8", 3L, 0L), initialTime + 4)); testHarness.processWatermark(new Watermark(10000L)); // checkpoint trigger finishBundle testHarness.prepareSnapshotPreBarrier(0L); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 0L, TimestampData.fromEpochMillis(-5000L), TimestampData.fromEpochMillis(5000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c2", 3L, TimestampData.fromEpochMillis(-5000L), TimestampData.fromEpochMillis(5000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c2", 3L, TimestampData.fromEpochMillis(0L), TimestampData.fromEpochMillis(10000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 0L, TimestampData.fromEpochMillis(0L), TimestampData.fromEpochMillis(10000L)))); expectedOutput.add(new Watermark(10000L)); assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.processWatermark(20000L); testHarness.close(); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 1L, TimestampData.fromEpochMillis(5000L), TimestampData.fromEpochMillis(15000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 2L, TimestampData.fromEpochMillis(10000L), TimestampData.fromEpochMillis(20000L)))); expectedOutput.add(new Watermark(20000L)); assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
@Override public RelativeRange apply(final Period period) { if (period != null) { return RelativeRange.Builder.builder() .from(period.withYears(0).withMonths(0).plusDays(period.getYears() * 365).plusDays(period.getMonths() * 30).toStandardSeconds().getSeconds()) .build(); } else { return null; } }
@Test void testYearsPeriodConversion() { final RelativeRange result = converter.apply(Period.years(3)); verifyResult(result, 3 * 365 * 24 * 60 * 60); }
@Subscribe public void inputCreated(InputCreated inputCreatedEvent) { final String inputId = inputCreatedEvent.id(); LOG.debug("Input created: {}", inputId); final Input input; try { input = inputService.find(inputId); } catch (NotFoundException e) { LOG.warn("Received InputCreated event but could not find input {}", inputId, e); return; } final IOState<MessageInput> inputState = inputRegistry.getInputState(inputId); if (inputState != null) { inputRegistry.remove(inputState); } if (input.isGlobal() || this.nodeId.getNodeId().equals(input.getNodeId())) { startInput(input); } }
@Test public void inputCreatedStopsInputIfItIsRunning() throws Exception { final String inputId = "input-id"; final Input input = mock(Input.class); @SuppressWarnings("unchecked") final IOState<MessageInput> inputState = mock(IOState.class); when(inputService.find(inputId)).thenReturn(input); when(inputRegistry.getInputState(inputId)).thenReturn(inputState); listener.inputCreated(InputCreated.create(inputId)); verify(inputRegistry, times(1)).remove(inputState); }
@Bean("ReadCache") public ReadCache provideReader(AnalysisCacheEnabled analysisCacheEnabled, AnalysisCacheMemoryStorage storage) { if (analysisCacheEnabled.isEnabled()) { storage.load(); return new ReadCacheImpl(storage); } return new NoOpReadCache(); }
@Test public void provide_real_reader_cache_when_enable() { when(analysisCacheEnabled.isEnabled()).thenReturn(true); var cache = cacheProvider.provideReader(analysisCacheEnabled, storage); verify(storage).load(); assertThat(cache).isInstanceOf(ReadCacheImpl.class); }
@Transactional public long createReview(CreateReviewRequest request) { ReviewGroup reviewGroup = validateReviewGroupByRequestCode(request.reviewRequestCode()); Template template = templateRepository.findById(reviewGroup.getTemplateId()) .orElseThrow(() -> new TemplateNotFoundByReviewGroupException( reviewGroup.getId(), reviewGroup.getTemplateId())); validateSubmittedQuestionsContainedInTemplate(reviewGroup.getTemplateId(), request); validateOnlyRequiredQuestionsSubmitted(template, request); return saveReview(request, reviewGroup); }
@Test void 필수_질문에_모두_응답하는_경우_예외가_발생하지_않는다() { // 리뷰 그룹 저장 String reviewRequestCode = "1234"; reviewGroupRepository.save(new ReviewGroup("리뷰어", "프로젝트", reviewRequestCode, "12341234")); // 필수 선택형 질문, 섹션 저장 Question alwaysRequiredQuestion = questionRepository.save( new Question(true, QuestionType.CHECKBOX, "질문", "가이드라인", 1) ); OptionGroup alwaysRequiredOptionGroup = optionGroupRepository.save( new OptionGroup(alwaysRequiredQuestion.getId(), 1, 2) ); OptionItem alwaysRequiredOptionItem1 = optionItemRepository.save( new OptionItem("선택지", alwaysRequiredOptionGroup.getId(), 1, OptionType.KEYWORD) ); OptionItem alwaysRequiredOptionItem2 = optionItemRepository.save( new OptionItem("선택지", alwaysRequiredOptionGroup.getId(), 2, OptionType.KEYWORD) ); Section alwaysRequiredSection = sectionRepository.save( new Section(VisibleType.ALWAYS, List.of(alwaysRequiredQuestion.getId()), null, "섹션명", "말머리", 1) ); // 필수가 아닌 서술형 질문 저장 Question notRequiredQuestion = questionRepository.save( new Question(false, QuestionType.TEXT, "질문", "가이드라인", 1) ); Section notRequiredSection = sectionRepository.save( new Section(VisibleType.ALWAYS, List.of(notRequiredQuestion.getId()), null, "섹션명", "말머리", 1) ); // optionItem 선택에 따라서 required 가 달라지는 섹션1 저장 Question conditionalTextQuestion1 = questionRepository.save( new Question(true, QuestionType.TEXT, "질문", "가이드라인", 1) ); Question conditionalCheckQuestion = questionRepository.save( new Question(true, QuestionType.CHECKBOX, "질문", "가이드라인", 1) ); OptionGroup conditionalOptionGroup = optionGroupRepository.save( new OptionGroup(conditionalCheckQuestion.getId(), 1, 2) ); OptionItem conditionalOptionItem = optionItemRepository.save( new OptionItem("선택지", conditionalOptionGroup.getId(), 1, OptionType.KEYWORD) ); Section conditionalSection1 = sectionRepository.save( new Section(VisibleType.CONDITIONAL, List.of(conditionalTextQuestion1.getId(), conditionalCheckQuestion.getId()), alwaysRequiredOptionItem1.getId(), "섹션명", "말머리", 1) ); // optionItem 선택에 따라서 required 가 달라지는 섹션2 저장 Question conditionalQuestion2 = questionRepository.save( new Question(true, QuestionType.TEXT, "질문", "가이드라인", 1) ); Section conditionalSection2 = sectionRepository.save( new Section(VisibleType.CONDITIONAL, List.of(conditionalQuestion2.getId()), alwaysRequiredOptionItem2.getId(), "섹션명", "말머리", 1) ); // 템플릿 저장 templateRepository.save(new Template( List.of(alwaysRequiredSection.getId(), conditionalSection1.getId(), conditionalSection2.getId(), notRequiredSection.getId()) )); // 각 질문에 대한 답변 생성 CreateReviewAnswerRequest alwaysRequiredAnswer = new CreateReviewAnswerRequest( alwaysRequiredQuestion.getId(), List.of(alwaysRequiredOptionItem1.getId()), null); CreateReviewAnswerRequest conditionalTextAnswer1 = new CreateReviewAnswerRequest( conditionalTextQuestion1.getId(), null, "답변".repeat(30)); CreateReviewAnswerRequest conditionalCheckAnswer1 = new CreateReviewAnswerRequest( conditionalCheckQuestion.getId(), List.of(conditionalOptionItem.getId()), null); CreateReviewAnswerRequest conditionalTextAnswer2 = new CreateReviewAnswerRequest( conditionalQuestion2.getId(), null, "답변".repeat(30)); // 상황별로 다르게 구성한 리뷰 생성 dto CreateReviewRequest properRequest = new CreateReviewRequest( reviewRequestCode, List.of(alwaysRequiredAnswer, conditionalTextAnswer1, conditionalCheckAnswer1)); CreateReviewRequest selectedOptionIdQuestionMissingRequest1 = new CreateReviewRequest( reviewRequestCode, List.of(alwaysRequiredAnswer)); CreateReviewRequest selectedOptionIdQuestionMissingRequest2 = new CreateReviewRequest( reviewRequestCode, List.of(alwaysRequiredAnswer, conditionalTextAnswer1)); CreateReviewRequest selectedOptionIdQuestionMissingRequest3 = new CreateReviewRequest( reviewRequestCode, List.of(alwaysRequiredAnswer, conditionalCheckAnswer1)); CreateReviewRequest unnecessaryQuestionIncludedRequest = new CreateReviewRequest( reviewRequestCode, List.of(alwaysRequiredAnswer, conditionalTextAnswer1, conditionalCheckAnswer1, conditionalTextAnswer2)); // when, then assertThatCode(() -> createReviewService.createReview(properRequest)) .doesNotThrowAnyException(); assertThatCode(() -> createReviewService.createReview(selectedOptionIdQuestionMissingRequest1)) .isInstanceOf(MissingRequiredQuestionException.class); assertThatCode(() -> createReviewService.createReview(selectedOptionIdQuestionMissingRequest2)) .isInstanceOf(MissingRequiredQuestionException.class); assertThatCode(() -> createReviewService.createReview(selectedOptionIdQuestionMissingRequest3)) .isInstanceOf(MissingRequiredQuestionException.class); assertThatCode(() -> createReviewService.createReview(unnecessaryQuestionIncludedRequest)) .isInstanceOf(UnnecessaryQuestionIncludedException.class); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = originalGlyphIds; for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub)); }
@Test void testApplyTransforms_ra_phala() { // given List<Integer> glyphsAfterGsub = Arrays.asList(274, 82); // when List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("দ্রুত")); // then assertEquals(glyphsAfterGsub, result); }
public static Object convert(Class<?> expectedClass, Object originalObject) { if (originalObject == null) { return null; } Class<?> currentClass = originalObject.getClass(); if (expectedClass.isAssignableFrom(currentClass)) { return originalObject; } if (PrimitiveBoxedUtils.areSameWithBoxing(expectedClass, originalObject.getClass())) { // No cast/transformation originalObject return originalObject; } if (expectedClass == String.class) { return originalObject.toString(); } Object toReturn; String currentClassName = currentClass.getName(); switch (currentClassName) { case "java.lang.String": toReturn = convertFromString(expectedClass, (String) originalObject); break; case "int": case "java.lang.Integer": toReturn = convertFromInteger(expectedClass, (Integer) originalObject); break; case "double": case "java.lang.Double": toReturn = convertFromDouble(expectedClass, (Double) originalObject); break; case "float": case "java.lang.Float": toReturn = convertFromFloat(expectedClass, (Float) originalObject); break; default: throw new KiePMMLException(String.format(FAILED_CONVERSION, originalObject, expectedClass.getName())); } return toReturn; }
@Test void convertConvertibleFromString() { CONVERTIBLE_FROM_STRING.forEach((s, expected) -> { Class<?> expectedClass = expected.getClass(); Object retrieved = ConverterTypeUtil.convert(expectedClass, s); assertThat(retrieved).isEqualTo(expected); }); }
@Override public void convertWeightsForChildQueues(FSQueue queue, CapacitySchedulerConfiguration csConfig) { List<FSQueue> children = queue.getChildQueues(); if (queue instanceof FSParentQueue || !children.isEmpty()) { QueuePath queuePath = new QueuePath(queue.getName()); if (queue.getName().equals(ROOT_QUEUE)) { csConfig.setNonLabeledQueueWeight(queuePath, queue.getWeight()); } children.forEach(fsQueue -> csConfig.setNonLabeledQueueWeight( new QueuePath(fsQueue.getName()), fsQueue.getWeight())); csConfig.setAutoQueueCreationV2Enabled(queuePath, true); } }
@Test public void testSingleWeightConversion() { FSQueue root = createFSQueues(1); converter.convertWeightsForChildQueues(root, csConfig); assertEquals("root weight", 1.0f, csConfig.getNonLabeledQueueWeight(ROOT), 0.0f); assertEquals("root.a weight", 1.0f, csConfig.getNonLabeledQueueWeight(ROOT_A), 0.0f); assertEquals("Number of properties", 22, csConfig.getPropsWithPrefix(PREFIX).size()); }
@VisibleForTesting public static <ConfigT> ConfigT payloadToConfig( ExternalConfigurationPayload payload, Class<ConfigT> configurationClass) { try { return payloadToConfigSchema(payload, configurationClass); } catch (NoSuchSchemaException schemaException) { LOG.warn( "Configuration class '{}' has no schema registered. Attempting to construct with setter" + " approach.", configurationClass.getName()); try { return payloadToConfigSetters(payload, configurationClass); } catch (ReflectiveOperationException e) { throw new IllegalArgumentException( String.format( "Failed to construct instance of configuration class '%s'", configurationClass.getName()), e); } } }
@Test public void testCompoundCodersForExternalConfiguration_setters() throws Exception { ExternalTransforms.ExternalConfigurationPayload externalConfig = encodeRowIntoExternalConfigurationPayload( Row.withSchema( Schema.of( Field.nullable("config_key1", FieldType.INT64), Field.nullable("config_key2", FieldType.iterable(FieldType.BYTES)), Field.of("config_key3", FieldType.map(FieldType.STRING, FieldType.INT64)), Field.of( "config_key4", FieldType.map(FieldType.STRING, FieldType.array(FieldType.INT64))))) .withFieldValue("config_key1", 1L) .withFieldValue("config_key2", BYTE_LIST) .withFieldValue("config_key3", BYTE_KV_LIST) .withFieldValue("config_key4", BYTE_KV_LIST_WITH_LIST_VALUE) .build()); TestConfigSetters config = ExpansionService.payloadToConfig(externalConfig, TestConfigSetters.class); assertThat(config.configKey1, Matchers.is(1L)); assertThat(config.configKey2, contains(BYTE_LIST.toArray())); assertThat(config.configKey3, is(notNullValue())); // no-op for checker framework if (config.configKey3 != null) { assertThat( config.configKey3.entrySet(), containsInAnyOrder( BYTE_KV_LIST.entrySet().stream() .map( (entry) -> allOf( hasProperty("key", equalTo(entry.getKey())), hasProperty("value", equalTo(entry.getValue())))) .collect(Collectors.toList()))); } assertThat(config.configKey4, is(notNullValue())); // no-op for checker framework if (config.configKey4 != null) { assertThat( config.configKey4.entrySet(), containsInAnyOrder( BYTE_KV_LIST_WITH_LIST_VALUE.entrySet().stream() .map( (entry) -> allOf( hasProperty("key", equalTo(entry.getKey())), hasProperty("value", equalTo(entry.getValue())))) .collect(Collectors.toList()))); } }
public static <T> T copy(Object source, Class<T> targetClass) { return copy(source, targetClass, null); }
@Test public void copyTest() { SampleBean bean = new SampleBean(); OtherSampleBean otherBean = new OtherSampleBean(); bean.setValue("Hello world"); bean.setValue2("123"); CglibUtil.copy(bean, otherBean); assertEquals("Hello world", otherBean.getValue()); // 无定义转换器,转换失败 assertEquals(0, otherBean.getValue2()); OtherSampleBean otherBean2 = CglibUtil.copy(bean, OtherSampleBean.class); assertEquals("Hello world", otherBean2.getValue()); // 无定义转换器,转换失败 assertEquals(0, otherBean.getValue2()); otherBean = new OtherSampleBean(); //自定义转换器 CglibUtil.copy(bean, otherBean, (value, target, context) -> Convert.convertQuietly(target, value)); assertEquals("Hello world", otherBean.getValue()); assertEquals(123, otherBean.getValue2()); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldReportMisconfigurationsOfCleanupPolicyForRepartitionTopics() { final long retentionMs = 1000; setupTopicInMockAdminClient(topic1, repartitionTopicConfig()); final Map<String, String> repartitionTopicConfigCleanupPolicyCompact = repartitionTopicConfig(); repartitionTopicConfigCleanupPolicyCompact.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); setupTopicInMockAdminClient(topic2, repartitionTopicConfigCleanupPolicyCompact); final Map<String, String> repartitionTopicConfigCleanupPolicyCompactAndDelete = repartitionTopicConfig(); repartitionTopicConfigCleanupPolicyCompactAndDelete.put( TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE ); setupTopicInMockAdminClient(topic3, repartitionTopicConfigCleanupPolicyCompactAndDelete); final Map<String, String> repartitionTopicConfigWithFiniteRetentionMs = repartitionTopicConfig(); repartitionTopicConfigWithFiniteRetentionMs.put(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(retentionMs)); setupTopicInMockAdminClient(topic4, repartitionTopicConfigWithFiniteRetentionMs); final Map<String, String> repartitionTopicConfigWithRetentionBytesSet = repartitionTopicConfig(); repartitionTopicConfigWithRetentionBytesSet.put(TopicConfig.RETENTION_BYTES_CONFIG, "1024"); setupTopicInMockAdminClient(topic5, repartitionTopicConfigWithRetentionBytesSet); final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1); final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1); final InternalTopicConfig internalTopicConfig3 = setupRepartitionTopicConfig(topic3, 1); final InternalTopicConfig internalTopicConfig4 = setupRepartitionTopicConfig(topic4, 1); final InternalTopicConfig internalTopicConfig5 = setupRepartitionTopicConfig(topic5, 1); final ValidationResult validationResult = internalTopicManager.validate(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2), mkEntry(topic3, internalTopicConfig3), mkEntry(topic4, internalTopicConfig4), mkEntry(topic5, internalTopicConfig5) )); final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics(); assertThat(validationResult.missingTopics(), empty()); assertThat(misconfigurationsForTopics.size(), is(4)); assertThat(misconfigurationsForTopics, hasKey(topic2)); assertThat(misconfigurationsForTopics.get(topic2).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic2).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic2 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_COMPACT + "\".") ); assertThat(misconfigurationsForTopics, hasKey(topic3)); assertThat(misconfigurationsForTopics.get(topic3).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic3).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic3 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_COMPACT + "\".") ); assertThat(misconfigurationsForTopics, hasKey(topic4)); assertThat(misconfigurationsForTopics.get(topic4).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic4).get(0), is("Retention time (" + TopicConfig.RETENTION_MS_CONFIG + ") of existing internal topic " + topic4 + " is " + retentionMs + " but should be -1.") ); assertThat(misconfigurationsForTopics, hasKey(topic5)); assertThat(misconfigurationsForTopics.get(topic5).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic5).get(0), is("Retention byte (" + TopicConfig.RETENTION_BYTES_CONFIG + ") of existing internal topic " + topic5 + " is set but it should be unset.") ); }
@Override public OkHttpClient get() { final OkHttpClient.Builder clientBuilder = new OkHttpClient.Builder() .retryOnConnectionFailure(true) .connectTimeout(connectTimeout.getQuantity(), connectTimeout.getUnit()) .writeTimeout(writeTimeout.getQuantity(), writeTimeout.getUnit()) .readTimeout(readTimeout.getQuantity(), readTimeout.getUnit()); if (trustManagerAndSocketFactoryProvider != null) { // always set our own CA, might be overriden in later code clientBuilder.sslSocketFactory(trustManagerAndSocketFactoryProvider.getSslSocketFactory(), trustManagerAndSocketFactoryProvider.getTrustManager()); } if (httpProxyUri != null) { clientBuilder.proxySelector(proxySelectorProvider.get()); if (!isNullOrEmpty(httpProxyUri.getUserInfo())) { final List<String> list = Splitter.on(":") .limit(2) .splitToList(httpProxyUri.getUserInfo()); if (list.size() == 2) { clientBuilder.proxyAuthenticator(new ProxyAuthenticator(list.get(0), list.get(1))); } } } return clientBuilder.build(); }
@Test public void testSuccessfulConnectionWithoutProxy() throws IOException, InterruptedException { server.enqueue(successfulMockResponse()); final Request request = new Request.Builder().url(server.url("/")).get().build(); final Response response = client(null).newCall(request).execute(); assertThat(response.isSuccessful()).isTrue(); final ResponseBody body = response.body(); assertThat(body).isNotNull(); assertThat(body.string()).isEqualTo("Test"); assertThat(server.getRequestCount()).isEqualTo(1); final RecordedRequest recordedRequest = server.takeRequest(); assertThat(recordedRequest.getMethod()).isEqualTo("GET"); assertThat(recordedRequest.getPath()).isEqualTo("/"); }
@Override public boolean shouldSample() { // This load might race with the store below, causing multiple threads to get a sample // since the new timestamp has not been written yet, but it is extremely unlikely and // the consequences are not severe since this is a probabilistic sampler that does not // provide hard lower or upper bounds. long lastSampledAt = lastSampledAtNanoTime.get(); // TODO getPlain? No transitive visibility requirements long now = nanoClock.nanoTimeNow(); double secsSinceLastSample = (now - lastSampledAt) / 1_000_000_000.0; // As the time elapsed since last sample increases, so does the probability of a new sample // being selected. double sampleProb = Math.min(secsSinceLastSample * desiredSamplesPerSec, 1.0); if (randomSupplier.get().nextDouble() < sampleProb) { lastSampledAtNanoTime.set(now); // TODO setPlain? No transitive visibility requirements return true; } else { return false; } }
@Test void samples_are_rate_limited_per_second() { var clock = MockUtils.mockedClockReturning(ms2ns(10_000), ms2ns(10_500), ms2ns(10_500), ms2ns(10_501)); var rng = MockUtils.mockedRandomReturning(0.1, 0.51, 0.49, 0.01); var sampler = new ProbabilisticSampleRate(clock, () -> rng, 1.0); // 1st invocation, 10 seconds (technically "infinity") since last sample. P = 1.0, sampled assertTrue(sampler.shouldSample()); // 2nd invocation, 0.5 seconds since last sample. rng = 0.51 >= P = 0.5, not sampled assertFalse(sampler.shouldSample()); // 3rd invocation, 0.5 seconds since last sample. rng = 0.49 < P = 0.5, sampled assertTrue(sampler.shouldSample()); // 4th invocation, 0.001 seconds since last sample. rng = 0.01 >= P = 0.001, not sampled assertFalse(sampler.shouldSample()); }
public static Map<String, Application> toApplicationMap(List<InstanceInfo> instances) { Map<String, Application> applicationMap = new HashMap<String, Application>(); for (InstanceInfo instance : instances) { String appName = instance.getAppName(); Application application = applicationMap.get(appName); if (application == null) { applicationMap.put(appName, application = new Application(appName)); } application.addInstance(instance); } return applicationMap; }
@Test public void testToApplicationMapIfNotNullReturnMapOfApplication() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.ADDED); Assert.assertEquals(1, EurekaEntityFunctions.toApplicationMap( new ArrayList<>(Arrays.asList( application.getByInstanceId("foo")))).size()); }
@Override public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Delete file %s to be replaced with %s", target, file)); } new DropboxDeleteFeature(session).delete(Collections.singletonMap(target, status), callback, new Delete.DisabledCallback()); } // If the source path is a folder all its contents will be copied. final RelocationResult result = new DbxUserFilesRequests(session.getClient(file)).copyV2(containerService.getKey(file), containerService.getKey(target)); listener.sent(status.getLength()); return target.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(result.getMetadata())); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Cannot copy {0}", e, file); } }
@Test public void testCopyToExistingFile() throws Exception { final Path folder = new DropboxDirectoryFeature(session).mkdir( new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path test = new DropboxTouchFeature(session).touch( new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path copy = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DropboxTouchFeature(session).touch(copy, new TransferStatus()); new DropboxCopyFeature(session).copy(test, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener()); final Find find = new DefaultFindFeature(session); assertTrue(find.find(test)); assertTrue(find.find(copy)); new DropboxDeleteFeature(session).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, final boolean endOfStream, ChannelPromise promise) { promise = promise.unvoid(); final Http2Stream stream; try { stream = requireStream(streamId); // Verify that the stream is in the appropriate state for sending DATA frames. switch (stream.state()) { case OPEN: case HALF_CLOSED_REMOTE: // Allowed sending DATA frames in these states. break; default: throw new IllegalStateException("Stream " + stream.id() + " in unexpected state " + stream.state()); } } catch (Throwable e) { data.release(); return promise.setFailure(e); } // Hand control of the frame to the flow controller. flowController().addFlowControlled(stream, new FlowControlledData(stream, data, padding, endOfStream, promise)); return promise; }
@Test public void dataWriteShouldSucceed() throws Exception { createStream(STREAM_ID, false); final ByteBuf data = dummyData(); ChannelPromise p = newPromise(); encoder.writeData(ctx, STREAM_ID, data, 0, true, p); assertEquals(8, payloadCaptor.getValue().size()); payloadCaptor.getValue().write(ctx, 8); assertEquals(0, payloadCaptor.getValue().size()); assertEquals("abcdefgh", writtenData.get(0)); assertEquals(0, data.refCnt()); assertTrue(p.isSuccess()); }
public void bootstrap(String device, int rootBandwidthMbit, int yarnBandwidthMbit) throws ResourceHandlerException { if (device == null) { throw new ResourceHandlerException("device cannot be null!"); } String tmpDirBase = conf.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ResourceHandlerException("hadoop.tmp.dir not set!"); } tmpDirPath = tmpDirBase + "/nm-tc-rules"; File tmpDir = new File(tmpDirPath); if (!(tmpDir.exists() || tmpDir.mkdirs())) { LOG.warn("Unable to create directory: " + tmpDirPath); throw new ResourceHandlerException("Unable to create directory: " + tmpDirPath); } this.device = device; this.rootBandwidthMbit = rootBandwidthMbit; this.yarnBandwidthMbit = yarnBandwidthMbit; defaultClassBandwidthMbit = (rootBandwidthMbit - yarnBandwidthMbit) <= 0 ? rootBandwidthMbit : (rootBandwidthMbit - yarnBandwidthMbit); boolean recoveryEnabled = conf.getBoolean(YarnConfiguration .NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED); String state = null; if (!recoveryEnabled) { LOG.info("NM recovery is not enabled. We'll wipe tc state before proceeding."); } else { //NM recovery enabled - run a state check state = readState(); if (checkIfAlreadyBootstrapped(state)) { LOG.info("TC configuration is already in place. Not wiping state."); //We already have the list of existing container classes, if any //that were created after bootstrapping reacquireContainerClasses(state); return; } else { LOG.info("TC configuration is incomplete. Wiping tc state before proceeding"); } } wipeState(); //start over in case preview bootstrap was incomplete initializeState(); }
@Test public void testBootstrapRecoveryDisabled() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass (PrivilegedOperation.class); //NM_RECOVERY_DISABLED - so we expect two privileged operation executions //one for wiping tc state - a second for initializing state verify(privilegedOperationExecutorMock, times(2)) .executePrivilegedOperation(opCaptor.capture(), eq(false)); //Now verify that the two operations were correct List<PrivilegedOperation> ops = opCaptor.getAllValues(); verifyTrafficControlOperation(ops.get(0), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(WIPE_STATE_CMD)); verifyTrafficControlOperation(ops.get(1), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD, ADD_ROOT_CLASS_CMD, ADD_DEFAULT_CLASS_CMD, ADD_YARN_CLASS_CMD)); } catch (ResourceHandlerException | PrivilegedOperationException | IOException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } }
public void add(Boolean bool) { elements.add(bool == null ? JsonNull.INSTANCE : new JsonPrimitive(bool)); }
@Test public void testToString() { JsonArray array = new JsonArray(); assertThat(array.toString()).isEqualTo("[]"); array.add(JsonNull.INSTANCE); array.add(Float.NaN); array.add("a\0"); JsonArray nestedArray = new JsonArray(); nestedArray.add('"'); array.add(nestedArray); JsonObject nestedObject = new JsonObject(); nestedObject.addProperty("n\0", 1); array.add(nestedObject); assertThat(array.toString()).isEqualTo("[null,NaN,\"a\\u0000\",[\"\\\"\"],{\"n\\u0000\":1}]"); }
@Override public KarateProtocol protocol() { KarateProtocol protocol = new KarateProtocol(uriPatterns); if (nameResolver != null) { protocol.nameResolver_$eq((req, sr) -> nameResolver.apply(req, sr)); } protocol.runner_$eq(runner); return protocol; }
@Test void uriPatterns() { KarateProtocol protocol = KarateDsl.karateProtocol( KarateDsl.uri("foo").nil(), KarateDsl.uri("bar").pauseFor("get", 110, "post", 220) ).protocol(); assertEquals(110, protocol.pauseFor("bar", "get")); assertEquals(220, protocol.pauseFor("bar", "post")); assertEquals(0, protocol.pauseFor("bar", "put")); assertEquals(0, protocol.pauseFor("foo", "get")); assertEquals(0, protocol.pauseFor("foobar", "get")); assertTrue(protocol.pathMatches("/foo").isDefined()); assertTrue(protocol.pathMatches("/bar").isDefined()); assertFalse(protocol.pathMatches("/foobar").isDefined()); }
public Object getCell(final int columnIndex) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1); return data.get(columnIndex - 1); }
@Test void assertGetCellWithBooleanValue() { LocalDataQueryResultRow actual = new LocalDataQueryResultRow(true, Boolean.FALSE); assertThat(actual.getCell(1), is("true")); assertThat(actual.getCell(2), is("false")); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // Get the mime4j configuration, or use a default one MimeConfig config = new MimeConfig.Builder().setMaxLineLen(100000).setMaxHeaderLen(100000).build(); config = context.get(MimeConfig.class, config); Detector localDetector = context.get(Detector.class); if (localDetector == null) { //lazily load this if necessary if (detector == null) { EmbeddedDocumentUtil embeddedDocumentUtil = new EmbeddedDocumentUtil(context); detector = embeddedDocumentUtil.getDetector(); } localDetector = detector; } MimeStreamParser parser = new MimeStreamParser(config, null, new DefaultBodyDescriptorBuilder()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); MailContentHandler mch = new MailContentHandler(xhtml, localDetector, metadata, context, config.isStrictParsing(), extractAllAlternatives); parser.setContentHandler(mch); parser.setContentDecoding(true); parser.setNoRecurse(); xhtml.startDocument(); TikaInputStream tstream = TikaInputStream.get(stream); try { parser.parse(tstream); } catch (IOException e) { tstream.throwIfCauseOf(e); throw new TikaException("Failed to parse an email message", e); } catch (MimeException e) { // Unwrap the exception in case it was not thrown by mime4j Throwable cause = e.getCause(); if (cause instanceof TikaException) { throw (TikaException) cause; } else if (cause instanceof SAXException) { throw (SAXException) cause; } else { throw new TikaException("Failed to parse an email message", e); } } xhtml.endDocument(); }
@Test public void testSomeMissingHeaders() throws Exception { Metadata metadata = new Metadata(); InputStream stream = getStream("test-documents/testRFC822-limitedheaders"); ContentHandler handler = new BodyContentHandler(); ParseContext context = new ParseContext(); context.set(Parser.class, EXTRACT_ALL_ALTERNATIVES_PARSER); EXTRACT_ALL_ALTERNATIVES_PARSER.parse(stream, handler, metadata, context); assertEquals(true, metadata.isMultiValued(TikaCoreProperties.CREATOR)); assertEquals("xyz", metadata.getValues(TikaCoreProperties.CREATOR)[0]); assertEquals("abc", metadata.getValues(TikaCoreProperties.CREATOR)[1]); assertEquals(true, metadata.isMultiValued(Metadata.MESSAGE_FROM)); assertEquals("xyz", metadata.getValues(Metadata.MESSAGE_FROM)[0]); assertEquals("abc", metadata.getValues(Metadata.MESSAGE_FROM)[1]); assertEquals(true, metadata.isMultiValued(Metadata.MESSAGE_TO)); assertEquals("abc", metadata.getValues(Metadata.MESSAGE_TO)[0]); assertEquals("def", metadata.getValues(Metadata.MESSAGE_TO)[1]); assertEquals("abcd", metadata.get(TikaCoreProperties.TITLE)); assertEquals("abcd", metadata.get(TikaCoreProperties.SUBJECT)); assertContains("bar biz bat", handler.toString()); }
@Override public void pluginStateChanged(PluginStateEvent event) { log.debug("The state of plugin '{}' has changed from '{}' to '{}'", event.getPlugin().getPluginId(), event.getOldState(), event.getPluginState()); }
@Test void pluginStateChangedShouldLogStateChange() { Logger mockedLogger = mock(Logger.class); try (MockedStatic<LoggerFactory> context = Mockito.mockStatic(LoggerFactory.class)) { context.when(() -> LoggerFactory.getLogger(Mockito.any(Class.class))) .thenReturn(mockedLogger); // create a PluginStateEvent PluginManager pluginManager = mock(PluginManager.class); PluginWrapper pluginWrapper = mock(PluginWrapper.class); when(pluginWrapper.getPluginId()).thenReturn("testPlugin"); when(pluginWrapper.getPluginState()).thenReturn(PluginState.STARTED); PluginStateEvent event = new PluginStateEvent(pluginManager, pluginWrapper, PluginState.CREATED); // call the method under test LoggingPluginStateListener listener = new LoggingPluginStateListener(); listener.pluginStateChanged(event); // verify that the logger was called with the expected message verify(mockedLogger).debug("The state of plugin '{}' has changed from '{}' to '{}'", "testPlugin", PluginState.CREATED, PluginState.STARTED); } }
@Override public synchronized ScheduleResult schedule() { dropListenersFromWhenFinishedOrNewLifespansAdded(); int overallSplitAssignmentCount = 0; ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder(); List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>(); boolean anyBlockedOnPlacements = false; boolean anyBlockedOnNextSplitBatch = false; boolean anyNotBlocked = false; for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) { Lifespan lifespan = entry.getKey(); ScheduleGroup scheduleGroup = entry.getValue(); if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) { verify(scheduleGroup.nextSplitBatchFuture == null); } else if (scheduleGroup.pendingSplits.isEmpty()) { // try to get the next batch if (scheduleGroup.nextSplitBatchFuture == null) { scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize); long start = System.nanoTime(); addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start)); } if (scheduleGroup.nextSplitBatchFuture.isDone()) { SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture); scheduleGroup.nextSplitBatchFuture = null; scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits()); if (nextSplits.isLastBatch()) { if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) { // Add an empty split in case no splits have been produced for the source. // For source operators, they never take input, but they may produce output. // This is well handled by Presto execution engine. // However, there are certain non-source operators that may produce output without any input, // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is (). // Scheduling an empty split kicks off necessary driver instantiation to make this work. scheduleGroup.pendingSplits.add(new Split( splitSource.getConnectorId(), splitSource.getTransactionHandle(), new EmptySplit(splitSource.getConnectorId()), lifespan, NON_CACHEABLE)); } scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS; } } else { overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture); anyBlockedOnNextSplitBatch = true; continue; } } Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of(); if (!scheduleGroup.pendingSplits.isEmpty()) { if (!scheduleGroup.placementFuture.isDone()) { anyBlockedOnPlacements = true; continue; } if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) { scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED; } if (state == State.INITIALIZED) { state = State.SPLITS_ADDED; } // calculate placements for splits SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits); splitAssignment = splitPlacementResult.getAssignments(); // remove splits with successful placements splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here. overallSplitAssignmentCount += splitAssignment.size(); // if not completed placed, mark scheduleGroup as blocked on placement if (!scheduleGroup.pendingSplits.isEmpty()) { scheduleGroup.placementFuture = splitPlacementResult.getBlocked(); overallBlockedFutures.add(scheduleGroup.placementFuture); anyBlockedOnPlacements = true; } } // if no new splits will be assigned, update state and attach completion event Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of(); if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) { scheduleGroup.state = ScheduleGroupState.DONE; if (!lifespan.isTaskWide()) { InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId()); noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan); } } // assign the splits with successful placements overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification)); // Assert that "placement future is not done" implies "pendingSplits is not empty". // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line. // However, there are other reasons that could lead to this. // Note that `computeAssignments` is quite broken: // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked. // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion. // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here. if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) { anyNotBlocked = true; } } // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked. // If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now. // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source. // * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures, // which may contain recently published splits. We must not ignore those. // * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits. // Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now. // // Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry, // we can no longer claim schedule is complete after all splits are scheduled. // Splits schedule can only be considered as finished when all lifespan executions are done // (by calling `notifyAllLifespansFinishedExecution`) if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) { switch (state) { case INITIALIZED: // We have not scheduled a single split so far. // But this shouldn't be possible. See usage of EmptySplit in this method. throw new IllegalStateException("At least 1 split should have been scheduled for this plan node"); case SPLITS_ADDED: state = State.NO_MORE_SPLITS; splitSource.close(); // fall through case NO_MORE_SPLITS: state = State.FINISHED; whenFinishedOrNewLifespanAdded.set(null); // fall through case FINISHED: return ScheduleResult.nonBlocked( true, overallNewTasks.build(), overallSplitAssignmentCount); default: throw new IllegalStateException("Unknown state"); } } if (anyNotBlocked) { return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount); } if (anyBlockedOnPlacements) { // In a broadcast join, output buffers of the tasks in build source stage have to // hold onto all data produced before probe side task scheduling finishes, // even if the data is acknowledged by all known consumers. This is because // new consumers may be added until the probe side task scheduling finishes. // // As a result, the following line is necessary to prevent deadlock // due to neither build nor probe can make any progress. // The build side blocks due to a full output buffer. // In the meantime the probe side split cannot be consumed since // builder side hash table construction has not finished. // // TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about // task scheduling and creation -- these are done by the StageScheduler. overallNewTasks.addAll(finalizeTaskCreationIfNecessary()); } ScheduleResult.BlockedReason blockedReason; if (anyBlockedOnNextSplitBatch) { blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE; } else { blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP; } overallBlockedFutures.add(whenFinishedOrNewLifespanAdded); return ScheduleResult.blocked( false, overallNewTasks.build(), nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason, overallSplitAssignmentCount); }
@Test public void testScheduleSplitsBlock() { SubPlan plan = createPlan(); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap); StageScheduler scheduler = getSourcePartitionedScheduler(createFixedSplitSource(80, TestingSplit::createRemoteSplit), stage, nodeManager, nodeTaskMap, 1); // schedule first 60 splits, which will cause the scheduler to block for (int i = 0; i <= 60; i++) { ScheduleResult scheduleResult = scheduler.schedule(); assertFalse(scheduleResult.isFinished()); // blocks at 20 per node assertEquals(scheduleResult.getBlocked().isDone(), i != 60); // first three splits create new tasks assertEquals(scheduleResult.getNewTasks().size(), i < 3 ? 1 : 0); assertEquals(stage.getAllTasks().size(), i < 3 ? i + 1 : 3); assertPartitionedSplitCount(stage, min(i + 1, 60)); } for (RemoteTask remoteTask : stage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertEquals(splitsInfo.getCount(), 20); } // todo rewrite MockRemoteTask to fire a tate transition when splits are cleared, and then validate blocked future completes // drop the 20 splits from one node ((MockRemoteTask) stage.getAllTasks().get(0)).clearSplits(); // schedule remaining 20 splits for (int i = 0; i < 20; i++) { ScheduleResult scheduleResult = scheduler.schedule(); // finishes when last split is fetched if (i == 19) { assertEffectivelyFinished(scheduleResult, scheduler); } else { assertFalse(scheduleResult.isFinished()); } // does not block again assertTrue(scheduleResult.getBlocked().isDone()); // no additional tasks will be created assertEquals(scheduleResult.getNewTasks().size(), 0); assertEquals(stage.getAllTasks().size(), 3); // we dropped 20 splits so start at 40 and count to 60 assertPartitionedSplitCount(stage, min(i + 41, 60)); } for (RemoteTask remoteTask : stage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertEquals(splitsInfo.getCount(), 20); } stage.abort(); }
@Override public void updateGroup(MemberGroupUpdateReqVO updateReqVO) { // 校验存在 validateGroupExists(updateReqVO.getId()); // 更新 MemberGroupDO updateObj = MemberGroupConvert.INSTANCE.convert(updateReqVO); memberGroupMapper.updateById(updateObj); }
@Test public void testUpdateGroup_notExists() { // 准备参数 MemberGroupUpdateReqVO reqVO = randomPojo(MemberGroupUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> groupService.updateGroup(reqVO), GROUP_NOT_EXISTS); }
static ClockImpl createClock() { String clockImplClassName = System.getProperty(ClockProperties.HAZELCAST_CLOCK_IMPL); if (clockImplClassName != null) { try { return ClassLoaderUtil.newInstance(null, clockImplClassName); } catch (Exception e) { throw rethrow(e); } } String clockOffset = System.getProperty(ClockProperties.HAZELCAST_CLOCK_OFFSET); long offset = 0L; if (clockOffset != null) { try { offset = Long.parseLong(clockOffset); } catch (NumberFormatException e) { throw rethrow(e); } } if (offset != 0L) { return new SystemOffsetClock(offset); } return new SystemClock(); }
@Test public void testCreateClock_withDefaults() { Clock.ClockImpl clock = Clock.createClock(); assertInstanceOf(Clock.SystemClock.class, clock); }
@Override public Mono<Plugin> getPreset(String presetName) { return getPresets() .filter(plugin -> Objects.equals(plugin.getMetadata().getName(), presetName)) .next(); }
@Test void getPresetIfNotFound() { var plugin = pluginService.getPreset("not-found-plugin"); StepVerifier.create(plugin) .verifyComplete(); }
@Override public void process(Exchange exchange) throws Exception { try { plc4XEndpoint.reconnectIfNeeded(); } catch (PlcConnectionException e) { if (log.isTraceEnabled()) { log.warn("Unable to reconnect, skipping request", e); } else { log.warn("Unable to reconnect, skipping request"); } return; } Message in = exchange.getIn(); Object body = in.getBody(); PlcWriteRequest plcWriteRequest; if (body instanceof Map) { //Check if we have a Map Map<String, Map<String, Object>> tags = (Map<String, Map<String, Object>>) body; plcWriteRequest = plc4XEndpoint.buildPlcWriteRequest(tags); } else { throw new PlcInvalidTagException("The body must contain a Map<String,Map<String,Object>"); } CompletableFuture<? extends PlcWriteResponse> completableFuture = plcWriteRequest.execute(); int currentlyOpenRequests = openRequests.incrementAndGet(); try { log.debug("Currently open requests including {}:{}", exchange, currentlyOpenRequests); Object plcWriteResponse = completableFuture.get(5000, TimeUnit.MILLISECONDS); if (exchange.getPattern().isOutCapable()) { Message out = exchange.getMessage(); out.copyFrom(exchange.getIn()); out.setBody(plcWriteResponse); } else { in.setBody(plcWriteResponse); } } finally { int openRequestsAfterFinish = openRequests.decrementAndGet(); log.trace("Open Requests after {}:{}", exchange, openRequestsAfterFinish); } }
@Test public void process() throws Exception { when(testExchange.getPattern()).thenReturn(ExchangePattern.InOnly); sut.process(testExchange); when(testExchange.getPattern()).thenReturn(ExchangePattern.InOut); sut.process(testExchange); when(testExchange.getIn().getBody()).thenReturn(2); }
public TimestampOffset lastEntry() { return lastEntry; }
@Test public void testLastEntry() { assertEquals(new TimestampOffset(RecordBatch.NO_TIMESTAMP, baseOffset), idx.lastEntry()); idx.maybeAppend(1, 1 + baseOffset); assertEquals(new TimestampOffset(1, baseOffset + 1), idx.lastEntry()); }
@Override public int hashCode() { return Objects.hash(appName, path); }
@Test public void testHashCode() { final int result = authPathApplyDTOUnderTest.hashCode(); assertEquals(2057813462, result); }
public int getBlockidx_intvl() { return blockidx_intvl; }
@Test public void testGetBlockidx_intvl() { assertEquals(TestParameters.VP_BLOCK_INDEX_INTERVAL, chmItspHeader.getBlockidx_intvl()); }
void handleConfigDataChange(Event event) { // Generate ConfigDataChangeEvent concurrently if (event instanceof ConfigDataChangeEvent) { ConfigDataChangeEvent evt = (ConfigDataChangeEvent) event; DumpRequest dumpRequest = DumpRequest.create(evt.dataId, evt.group, evt.tenant, evt.lastModifiedTs, NetUtils.localIP()); dumpRequest.setBeta(evt.isBeta); dumpRequest.setBatch(evt.isBatch); dumpRequest.setTag(evt.tag); DumpService.this.dump(dumpRequest); } }
@Test void testHandleConfigDataChange() { ConfigDataChangeEvent configDataChangeEvent = new ConfigDataChangeEvent("dataId", "group", System.currentTimeMillis()); ReflectionTestUtils.setField(dumpService, "dumpTaskMgr", dumpTaskMgr); Mockito.doNothing().when(dumpTaskMgr).addTask(any(), any()); dumpService.handleConfigDataChange(configDataChangeEvent); Mockito.verify(dumpTaskMgr, times(1)) .addTask(eq(GroupKey.getKeyTenant(configDataChangeEvent.dataId, configDataChangeEvent.group, configDataChangeEvent.tenant)), any(DumpTask.class)); }
public void startup() { // Initialize and configure RSM and RLMM. This will start RSM, RLMM resources which may need to start resources // in connecting to the brokers or remote storages. configureRSM(); configureRLMM(); remoteLogManagerConfigured = true; }
@Test void testStartup() { remoteLogManager.startup(); ArgumentCaptor<Map<String, Object>> capture = ArgumentCaptor.forClass(Map.class); verify(remoteStorageManager, times(1)).configure(capture.capture()); assertEquals(brokerId, capture.getValue().get("broker.id")); assertEquals(remoteLogStorageTestVal, capture.getValue().get(remoteLogStorageTestProp)); verify(remoteLogMetadataManager, times(1)).configure(capture.capture()); assertEquals(brokerId, capture.getValue().get("broker.id")); assertEquals(logDir, capture.getValue().get("log.dir")); // verify the configs starting with "remote.log.metadata", "remote.log.metadata.common.client." // "remote.log.metadata.producer.", and "remote.log.metadata.consumer." are correctly passed in assertEquals(remoteLogMetadataTopicPartitionsNum, capture.getValue().get(REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP)); assertEquals(remoteLogMetadataTestVal, capture.getValue().get(remoteLogMetadataTestProp)); assertEquals(remoteLogMetadataConsumerTestVal, capture.getValue().get(remoteLogMetadataConsumerTestProp)); assertEquals(remoteLogMetadataProducerTestVal, capture.getValue().get(remoteLogMetadataProducerTestProp)); assertEquals(remoteLogMetadataCommonClientTestVal, capture.getValue().get(remoteLogMetadataCommonClientTestProp)); }
public Stream<Block> stream() { return streamBuffers() .map(serializer::makeBlock); }
@Test public void streamFirst100kCount() { File blockFile = new File(getClass().getResource("../core/first-100k-blocks.dat").getFile()); BlockFileLoader loader = new BlockFileLoader(BitcoinNetwork.MAINNET, Collections.singletonList(blockFile)); long blockCount = loader.stream().count(); assertEquals(439, blockCount); }
public static int getWeightedRandom(ArrayList<Float> weights) { int i; float totalWeight = 0; for (i = 0; i < weights.size(); i++) { if (weights.get(i) > 0) { totalWeight += weights.get(i); } } if (totalWeight == 0) { return -1; } float samplePoint = rand.nextFloat() * totalWeight; int lastIndex = 0; for (i = 0; i < weights.size(); i++) { if (weights.get(i) > 0) { if (samplePoint <= weights.get(i)) { return i; } else { lastIndex = i; samplePoint -= weights.get(i); } } } // This can only happen if samplePoint is very close to totalWeight and // float rounding kicks in during subtractions return lastIndex; }
@Test public void testGetWeightedRandom() { int i; float[] weights = new float[] {0, 0.1f, 0.2f, 0.2f, -0.1f, 0.1f, 0.2f, 0.1f, 0.1f}; float[] expectedWeights = new float[] {0, 0.1f, 0.2f, 0.2f, 0, 0.1f, 0.2f, 0.1f, 0.1f}; int[] result = new int[weights.length]; ArrayList<Float> weightsList = new ArrayList<>(); for (float weight : weights) { weightsList.add(weight); } int n = 10000000; for (i = 0; i < n; i++) { int sample = FederationPolicyUtils.getWeightedRandom(weightsList); result[sample]++; } for (i = 0; i < weights.length; i++) { double actualWeight = (float) result[i] / n; System.out.println(i + " " + actualWeight); Assert.assertTrue( "Index " + i + " Actual weight: " + actualWeight + " expected weight: " + expectedWeights[i], Math.abs(actualWeight - expectedWeights[i]) < 0.01); } }
@DeleteMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "permissions", action = ActionTypes.WRITE) public Object deletePermission(@RequestParam String role, @RequestParam String resource, @RequestParam String action) { nacosRoleService.deletePermission(role, resource, action); return RestResultUtils.success("delete permission ok!"); }
@Test void testDeletePermission() { RestResult<String> result = (RestResult<String>) permissionController.deletePermission("admin", "test", "test"); verify(nacosRoleService, times(1)).deletePermission(anyString(), anyString(), anyString()); assertEquals(200, result.getCode()); }
@Udf(description = "Returns the correctly rounded positive square root of a DOUBLE value") public Double sqrt( @UdfParameter( value = "value", description = "The value to get the square root of." ) final Integer value ) { return sqrt(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.sqrt((Integer)null), is(nullValue())); assertThat(udf.sqrt((Long)null), is(nullValue())); assertThat(udf.sqrt((Double)null), is(nullValue())); }
public static List<?> convertToList(Schema schema, Object value) { return convertToArray(ARRAY_SELECTOR_SCHEMA, value); }
@Test public void shouldConvertStringOfListWithOnlyNumericElementTypesIntoListOfLargestNumericType() { int thirdValue = Short.MAX_VALUE + 1; List<?> list = Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, " + thirdValue + "]"); assertEquals(3, list.size()); assertEquals(1, ((Number) list.get(0)).intValue()); assertEquals(2, ((Number) list.get(1)).intValue()); assertEquals(thirdValue, list.get(2)); }
public void removeFactMapping(FactMapping toRemove) { clearDatas(toRemove); scesimModelDescriptor.removeFactMapping(toRemove); }
@Test public void removeFactMapping() { final FactMapping factMappingByIndex = model.scesimModelDescriptor.getFactMappingByIndex(2); model.removeFactMapping(factMappingByIndex); verify(model, times(1)).clearDatas(eq(factMappingByIndex)); assertThat(model.scesimModelDescriptor.getFactMappings()).hasSize(FACT_MAPPINGS - 1).doesNotContain(factMappingByIndex); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testTruncationDetected() { // Create some records that include a leader epoch (1) MemoryRecordsBuilder builder = MemoryRecords.builder( ByteBuffer.allocate(1024), RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, 1 // record epoch is earlier than the leader epoch on the client ); builder.appendWithOffset(0L, 0L, "key".getBytes(), "value-1".getBytes()); builder.appendWithOffset(1L, 0L, "key".getBytes(), "value-2".getBytes()); builder.appendWithOffset(2L, 0L, "key".getBytes(), "value-3".getBytes()); MemoryRecords records = builder.build(); buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); // Initialize the epoch=2 Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(tp0.topic(), 4); MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), partitionCounts, tp -> 2, topicIds); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L); // Offset validation requires OffsetForLeaderEpoch request v3 or higher Node node = metadata.fetch().nodes().get(0); apiVersions.update(node.idString(), NodeApiVersions.create()); // Seek Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp0).leader, Optional.of(1)); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.of(1), leaderAndEpoch)); // Check for truncation, this should cause tp0 to go into validation OffsetFetcher offsetFetcher = new OffsetFetcher(new LogContext(), consumerClient, metadata, subscriptions, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions); offsetFetcher.validatePositionsIfNeeded(); // No fetches sent since we entered validation assertEquals(0, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); assertTrue(subscriptions.awaitingValidation(tp0)); // Prepare OffsetForEpoch response then check that we update the subscription position correctly. client.prepareResponse(prepareOffsetsForLeaderEpochResponse(tp0, Errors.NONE, 1, 10L)); consumerClient.pollNoWakeup(); assertFalse(subscriptions.awaitingValidation(tp0)); // Fetch again, now it works assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.pollNoWakeup(); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); assertEquals(subscriptions.position(tp0).offset, 3L); assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); }
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) { // Come up with final set of OffsetRanges to read (account for new partitions, limit number of events) long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE); long numEvents; if (sourceLimit == Long.MAX_VALUE) { numEvents = maxEventsToReadFromKafka; LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka); } else { numEvents = sourceLimit; } long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS); LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions); return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics); }
@Test public void testGetNextOffsetRangesFromEarliest() { HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(); testUtils.createTopic(testTopicName, 1); testUtils.sendMessages(testTopicName, Helpers.jsonifyRecords(dataGenerator.generateInserts("000", 1000))); KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("earliest", KAFKA_CHECKPOINT_TYPE_STRING)); OffsetRange[] nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 500, metrics); assertEquals(1, nextOffsetRanges.length); assertEquals(0, nextOffsetRanges[0].fromOffset()); assertEquals(500, nextOffsetRanges[0].untilOffset()); nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 5000, metrics); assertEquals(1, nextOffsetRanges.length); assertEquals(0, nextOffsetRanges[0].fromOffset()); assertEquals(1000, nextOffsetRanges[0].untilOffset()); }
public void createPipe(CreatePipeStmt stmt) throws DdlException { try { lock.writeLock().lock(); Pair<Long, String> dbIdAndName = resolvePipeNameUnlock(stmt.getPipeName()); boolean existed = nameToId.containsKey(dbIdAndName); if (existed) { if (!stmt.isIfNotExists() && !stmt.isReplace()) { ErrorReport.reportSemanticException(ErrorCode.ERR_PIPE_EXISTS); } if (stmt.isIfNotExists()) { return; } else if (stmt.isReplace()) { LOG.info("Pipe {} already exist, replace it with a new one", stmt.getPipeName()); Pipe pipe = pipeMap.get(nameToId.get(dbIdAndName)); dropPipeImpl(pipe); } } // Add pipe long id = GlobalStateMgr.getCurrentState().getNextId(); Pipe pipe = Pipe.fromStatement(id, stmt); putPipe(pipe); repo.addPipe(pipe); } finally { lock.writeLock().unlock(); } }
@Test public void testProperty() throws Exception { createPipe("create pipe p_batch_size properties('batch_size'='10GB') " + " as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')"); createPipe("create pipe p_batch_files properties('batch_files'='100') " + " as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')"); createPipe("create pipe p_poll_interval properties('poll_interval'='100') " + " as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')"); createPipe("create pipe p_auto_ingest properties('auto_ingest'='false') " + " as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')"); }