focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void deleteConfig(Long id) { // 校验配置存在 ConfigDO config = validateConfigExists(id); // 内置配置,不允许删除 if (ConfigTypeEnum.SYSTEM.getType().equals(config.getType())) { throw exception(CONFIG_CAN_NOT_DELETE_SYSTEM_TYPE); } // 删除 configMapper.deleteById(id); }
@Test public void testDeleteConfig_success() { // mock 数据 ConfigDO dbConfig = randomConfigDO(o -> { o.setType(ConfigTypeEnum.CUSTOM.getType()); // 只能删除 CUSTOM 类型 }); configMapper.insert(dbConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbConfig.getId(); // 调用 configService.deleteConfig(id); // 校验数据不存在了 assertNull(configMapper.selectById(id)); }
public RingbufferConfig setBackupCount(int backupCount) { this.backupCount = checkBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void setBackupCount_whenTooLarge() { RingbufferConfig ringbufferConfig = new RingbufferConfig(NAME); ringbufferConfig.setBackupCount(MAX_BACKUP_COUNT + 1); }
@Override public Object getValue(final int columnIndex, final Class<?> type) throws SQLException { if (boolean.class == type) { return resultSet.getBoolean(columnIndex); } if (byte.class == type) { return resultSet.getByte(columnIndex); } if (short.class == type) { return resultSet.getShort(columnIndex); } if (int.class == type) { return resultSet.getInt(columnIndex); } if (long.class == type) { return resultSet.getLong(columnIndex); } if (float.class == type) { return resultSet.getFloat(columnIndex); } if (double.class == type) { return resultSet.getDouble(columnIndex); } if (String.class == type) { return resultSet.getString(columnIndex); } if (BigDecimal.class == type) { return resultSet.getBigDecimal(columnIndex); } if (byte[].class == type) { return resultSet.getBytes(columnIndex); } if (Date.class == type) { return resultSet.getDate(columnIndex); } if (Time.class == type) { return resultSet.getTime(columnIndex); } if (Timestamp.class == type) { return resultSet.getTimestamp(columnIndex); } if (Blob.class == type) { return resultSet.getBlob(columnIndex); } if (Clob.class == type) { return resultSet.getClob(columnIndex); } if (Array.class == type) { return resultSet.getArray(columnIndex); } return resultSet.getObject(columnIndex); }
@Test void assertGetValueByLong() throws SQLException { ResultSet resultSet = mock(ResultSet.class); when(resultSet.getLong(1)).thenReturn(1L); assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, long.class), is(1L)); }
@Override public GenericRow apply(final GenericRow left, final GenericRow right) { final GenericRow row = new GenericRow(leftCount + rightCount + additionalCount); if (left != null) { row.appendAll(left.values()); } else { fillWithNulls(row, leftCount); } if (right != null) { row.appendAll(right.values()); } else { fillWithNulls(row, rightCount); } // Potentially append additional nulls as a holder for a synthetic key columns. // These columns are not populated, as they are not accessed, but must be present for the row // to match the rows schema. fillWithNulls(row, additionalCount); return row; }
@Test public void shouldJoinValueRightEmpty() { final KsqlValueJoiner joiner = new KsqlValueJoiner(leftSchema.value().size(), rightSchema.value().size(), 0 ); final GenericRow joined = joiner.apply(leftRow, null); final List<Object> expected = Arrays.asList(12L, "foobar", null, null); assertEquals(expected, joined.values()); }
@Override public void foreach(final ForeachAction<? super K, ? super V> action) { foreach(action, NamedInternal.empty()); }
@Test public void shouldNotAllowNullActionOnForEachWithName() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.foreach(null, Named.as("foreach"))); assertThat(exception.getMessage(), equalTo("action can't be null")); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedForwardMultiFields() { String[] forwardedFields = {"f1->f0,f1"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, threeIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testKeepProtocolUpperCase() { s = "HTTP://www.example.com"; t = "HTTP://www.example.com"; assertEquals(t, new HttpURL(s).toString()); }
public static DisplayData from(HasDisplayData component) { checkNotNull(component, "component argument cannot be null"); InternalBuilder builder = new InternalBuilder(); builder.include(Path.root(), component); return builder.build(); }
@Test public void testIncludeEmptyPath() { thrown.expectCause(isA(IllegalArgumentException.class)); DisplayData.from( new HasDisplayData() { @Override public void populateDisplayData(DisplayData.Builder builder) { builder.include("", new NoopDisplayData()); } }); }
@VisibleForTesting Schema convertSchema(org.apache.avro.Schema schema) { return Schema.of(getFields(schema)); }
@Test void convertSchema_primitiveFields() { Assertions.assertEquals(PRIMITIVE_TYPES_BQ_SCHEMA, SCHEMA_RESOLVER.convertSchema(PRIMITIVE_TYPES)); }
@Override public String getFileNameNoSuffix() { return fileNameNoSuffix; }
@Test void getFileNameNoSuffix() { PMMLRuntimeContextImpl retrieved = new PMMLRuntimeContextImpl(new PMMLRequestData(), fileName, memoryCompilerClassLoader); assertThat(retrieved.getFileNameNoSuffix()).isEqualTo(fileNameNoSuffix); }
public CompletableFuture<Void> deleteBackups(final UUID accountUuid) { final ExternalServiceCredentials credentials = secureValueRecoveryCredentialsGenerator.generateForUuid(accountUuid); final HttpRequest request = HttpRequest.newBuilder() .uri(deleteUri) .DELETE() .header(HttpHeaders.AUTHORIZATION, basicAuthHeader(credentials)) .build(); return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()).thenApply(response -> { if (HttpUtils.isSuccessfulResponse(response.statusCode())) { return null; } throw new SecureValueRecoveryException("Failed to delete backup: " + response.statusCode()); }); }
@Test void deleteStoredDataFailure() { final String username = RandomStringUtils.randomAlphabetic(16); final String password = RandomStringUtils.randomAlphanumeric(32); when(credentialsGenerator.generateForUuid(accountUuid)).thenReturn( new ExternalServiceCredentials(username, password)); wireMock.stubFor(delete(urlEqualTo(SecureValueRecovery2Client.DELETE_PATH)) .withBasicAuth(username, password) .willReturn(aResponse().withStatus(400))); final CompletionException completionException = assertThrows(CompletionException.class, () -> secureValueRecovery2Client.deleteBackups(accountUuid).join()); assertTrue(completionException.getCause() instanceof SecureValueRecoveryException); }
protected KiePMMLDroolsAST getKiePMMLDroolsASTCommon(final List<Field<?>> fields, final T model, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) { List<KiePMMLDroolsType> types = fieldTypeMap.values() .stream().map(kiePMMLOriginalTypeGeneratedType -> { String type = DATA_TYPE.byName(kiePMMLOriginalTypeGeneratedType.getOriginalType()).getMappedClass().getSimpleName(); return new KiePMMLDroolsType(kiePMMLOriginalTypeGeneratedType.getGeneratedType(), type); }) .collect(Collectors.toList()); types.addAll(KiePMMLDataDictionaryASTFactory.factory(fieldTypeMap).declareTypes(fields)); return getKiePMMLDroolsAST(fields, model, fieldTypeMap, types); }
@Test void getKiePMMLDroolsASTCommon() { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = new HashMap<>(); final List<Field<?>> fields = getFieldsFromDataDictionaryAndTransformationDictionaryAndLocalTransformations(pmml.getDataDictionary(), pmml.getTransformationDictionary(), scorecard.getLocalTransformations()); KiePMMLDroolsAST retrieved = droolsModelProvider.getKiePMMLDroolsASTCommon(fields, scorecard, fieldTypeMap); commonVerifyKiePMMLDroolsAST(retrieved, fieldTypeMap); commonVerifyFieldTypeMap(fieldTypeMap, pmml.getDataDictionary().getDataFields(), pmml.getTransformationDictionary().getDerivedFields(), scorecard.getLocalTransformations().getDerivedFields()); }
@Override public void init(Configuration config) throws HoodieException { try { this.fs = FileSystem.get(config); } catch (IOException e) { throw new HoodieException("KafkaConnectHdfsProvider initialization failed"); } }
@Test public void testMissingPartition() throws Exception { Path topicPath = tempDir.resolve("topic2"); Files.createDirectories(topicPath); // create regular kafka connect hdfs dirs new File(topicPath + "/year=2016/month=05/day=01/").mkdirs(); new File(topicPath + "/year=2016/month=05/day=02/").mkdirs(); // base files with missing partition new File(topicPath + "/year=2016/month=05/day=01/" + "topic1+0+100+200" + BASE_FILE_EXTENSION).createNewFile(); new File(topicPath + "/year=2016/month=05/day=01/" + "topic1+2+100+200" + BASE_FILE_EXTENSION).createNewFile(); new File(topicPath + "/year=2016/month=05/day=02/" + "topic1+0+201+300" + BASE_FILE_EXTENSION).createNewFile(); final TypedProperties props = new TypedProperties(); props.put("hoodie.streamer.checkpoint.provider.path", topicPath.toString()); final InitialCheckPointProvider provider = new KafkaConnectHdfsProvider(props); provider.init(HoodieTestUtils.getDefaultStorageConf().unwrap()); assertThrows(HoodieException.class, provider::getCheckpoint); }
public Collection<Double> currentConsumptionRateByQuery() { return collectorMap.values() .stream() .filter(collector -> collector.getGroupId() != null) .collect( Collectors.groupingBy( MetricCollector::getGroupId, Collectors.summingDouble( m -> m.aggregateStat(ConsumerCollector.CONSUMER_MESSAGES_PER_SEC, false) ) ) ) .values(); }
@Test public void shouldAggregateConsumptionStatsByQuery() { final MetricCollectors metricCollectors = new MetricCollectors(); final ConsumerCollector collector1 = new ConsumerCollector(); collector1.configure( ImmutableMap.of( ConsumerConfig.GROUP_ID_CONFIG, "group1", KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors ) ); final ConsumerCollector collector2 = new ConsumerCollector(); collector2.configure( ImmutableMap.of( ConsumerConfig.GROUP_ID_CONFIG, "group1", KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors ) ); final ConsumerCollector collector3 = new ConsumerCollector(); collector3.configure( ImmutableMap.of( ConsumerConfig.GROUP_ID_CONFIG, "group2", KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors ) ); final Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>(); final List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>(); for (int i = 0; i < 500; i++) { recordList.add( new ConsumerRecord<>( TEST_TOPIC, 1, 1, 1L, TimestampType .CREATE_TIME, 10, 10, "key", "1234567890", new RecordHeaders(), Optional.empty() ) ); } records.put(new TopicPartition(TEST_TOPIC, 1), recordList); final ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records); collector1.onConsume(consumerRecords); collector2.onConsume(consumerRecords); collector3.onConsume(consumerRecords); final List<Double> consumptionByQuery = new ArrayList<>( metricCollectors.currentConsumptionRateByQuery()); consumptionByQuery.sort(Comparator.naturalOrder()); // Each query will have a unique consumer group id. In this case we have two queries and 3 // consumers. So we should expect two results from the currentConsumptionRateByQuery call. assertEquals(2, consumptionByQuery.size()); // Same as the above test, the kafka `Rate` measurable stat reports the rate as a tenth // of what it should be because all the samples haven't been filled out yet. assertEquals(5.0, Math.floor(consumptionByQuery.get(0)), 0.1); assertEquals(10.0, Math.floor(consumptionByQuery.get(1)), 0.1); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeWithIncludeFailsOnDuplicateRegularColumnAndMetadataColumn() { Schema sourceSchema = Schema.newBuilder().column("one", DataTypes.INT()).build(); List<SqlNode> derivedColumns = Arrays.asList( metadataColumn("two", DataTypes.INT(), true), computedColumn("three", plus("two", "3")), regularColumn("two", DataTypes.INT()), regularColumn("four", DataTypes.STRING())); assertThatThrownBy( () -> util.mergeTables( getDefaultMergingStrategies(), sourceSchema, derivedColumns, Collections.emptyList(), null)) .isInstanceOf(ValidationException.class) .hasMessage( "A column named 'two' already exists in the table. Duplicate columns " + "exist in the metadata column and regular column. "); }
public void command(String primaryCommand, SecureConfig config, String... allArguments) { terminal.writeLine(""); final Optional<CommandLine> commandParseResult; try { commandParseResult = Command.parse(primaryCommand, allArguments); } catch (InvalidCommandException e) { terminal.writeLine(String.format("ERROR: %s", e.getMessage())); return; } if (commandParseResult.isEmpty()) { printHelp(); return; } final CommandLine commandLine = commandParseResult.get(); switch (commandLine.getCommand()) { case CREATE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Creates a new keystore. For example: 'bin/logstash-keystore create'"); return; } if (secretStoreFactory.exists(config.clone())) { terminal.write("An Logstash keystore already exists. Overwrite ? [y/N] "); if (isYes(terminal.readLine())) { create(config); } } else { create(config); } break; } case LIST: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("List all secret identifiers from the keystore. For example: " + "`bin/logstash-keystore list`. Note - only the identifiers will be listed, not the secrets."); return; } Collection<SecretIdentifier> ids = secretStoreFactory.load(config).list(); List<String> keys = ids.stream().filter(id -> !id.equals(LOGSTASH_MARKER)).map(id -> id.getKey()).collect(Collectors.toList()); Collections.sort(keys); keys.forEach(terminal::writeLine); break; } case ADD: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Add secrets to the keystore. For example: " + "`bin/logstash-keystore add my-secret`, at the prompt enter your secret. You will use the identifier ${my-secret} in your Logstash configuration."); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply an identifier to add. (e.g. bin/logstash-keystore add my-secret)"); return; } if (secretStoreFactory.exists(config.clone())) { final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { final SecretIdentifier id = new SecretIdentifier(argument); final byte[] existingValue = secretStore.retrieveSecret(id); if (existingValue != null) { SecretStoreUtil.clearBytes(existingValue); terminal.write(String.format("%s already exists. Overwrite ? [y/N] ", argument)); if (!isYes(terminal.readLine())) { continue; } } final String enterValueMessage = String.format("Enter value for %s: ", argument); char[] secret = null; while(secret == null) { terminal.write(enterValueMessage); final char[] readSecret = terminal.readSecret(); if (readSecret == null || readSecret.length == 0) { terminal.writeLine("ERROR: Value cannot be empty"); continue; } if (!ASCII_ENCODER.canEncode(CharBuffer.wrap(readSecret))) { terminal.writeLine("ERROR: Value must contain only ASCII characters"); continue; } secret = readSecret; } add(secretStore, id, SecretStoreUtil.asciiCharToBytes(secret)); } } else { terminal.writeLine("ERROR: Logstash keystore not found. Use 'create' command to create one."); } break; } case REMOVE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Remove secrets from the keystore. For example: " + "`bin/logstash-keystore remove my-secret`"); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply a value to remove. (e.g. bin/logstash-keystore remove my-secret)"); return; } final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { SecretIdentifier id = new SecretIdentifier(argument); if (secretStore.containsSecret(id)) { secretStore.purgeSecret(id); terminal.writeLine(String.format("Removed '%s' from the Logstash keystore.", id.getKey())); } else { terminal.writeLine(String.format("ERROR: '%s' does not exist in the Logstash keystore.", argument)); } } break; } } }
@Test public void testHelpList() { cli.command("list", null, "--help"); assertThat(terminal.out).containsIgnoringCase("List all secret identifiers from the keystore"); }
@Override protected void descendingSort(File[] matchingFileArray, Instant instant) { String regexForIndexExtreaction = createStemRegex(instant); final Pattern pattern = Pattern.compile(regexForIndexExtreaction); Arrays.sort(matchingFileArray, new Comparator<File>() { @Override public int compare(final File f1, final File f2) { int index1 = extractIndex(pattern, f1); int index2 = extractIndex(pattern, f2); if (index1 == index2) return 0; // descending sort, i.e. newest files first if (index2 < index1) return -1; else return 1; } private int extractIndex(Pattern pattern, File f1) { Matcher matcher = pattern.matcher(f1.getName()); if (matcher.find()) { String indexAsStr = matcher.group(1); if (indexAsStr == null || indexAsStr.isEmpty()) return NO_INDEX; // unreachable code? else return Integer.parseInt(indexAsStr); } else return NO_INDEX; } }); }
@Test public void badFilenames() { FileNamePattern fileNamePattern = new FileNamePattern("smoke-%d-%i.gz", context); SizeAndTimeBasedArchiveRemover remover = new SizeAndTimeBasedArchiveRemover(fileNamePattern, null); File[] fileArray = new File[2]; File[] expected = new File[2]; fileArray[0] = expected[0] = new File("/tmp/smoke-1970-01-01-b.gz"); fileArray[1] = expected[1] = new File("/tmp/smoke-1970-01-01-c.gz"); remover.descendingSort(fileArray, Instant.ofEpochMilli(0)); assertArrayEquals(expected, fileArray); }
public Optional<File> getFile(FileReferenceDownload fileReferenceDownload) { try { return getFutureFile(fileReferenceDownload).get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { fileReferenceDownloader.failedDownloading(fileReferenceDownload.fileReference()); return Optional.empty(); } }
@Test public void getFile() throws IOException { File downloadDir = fileDownloader.downloadDirectory(); { // fileReference already exists on disk, does not have to be downloaded String fileReferenceString = "foo"; String filename = "foo.jar"; FileReference fileReference = new FileReference(fileReferenceString); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); writeFileReference(downloadDir, fileReferenceString, filename); fileDownloader.downloads().completedDownloading(fileReference, fileReferenceFullPath); // Check that we get correct path and content when asking for file reference Optional<File> pathToFile = getFile(fileReference); assertTrue(pathToFile.isPresent()); String downloadedFile = new File(fileReferenceFullPath, filename).getAbsolutePath(); assertEquals(new File(fileReferenceFullPath, filename).getAbsolutePath(), downloadedFile); assertEquals("content", IOUtils.readFile(pathToFile.get())); // Verify download status when downloaded assertDownloadStatus(fileReference, 1.0); } { // fileReference does not exist on disk, needs to be downloaded, but fails when asking upstream for file) connection.setResponseHandler(new MockConnection.UnknownFileReferenceResponseHandler()); FileReference fileReference = new FileReference("bar"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status when unable to download assertDownloadStatus(fileReference, 0.0); } { // fileReference does not exist on disk, needs to be downloaded) FileReference fileReference = new FileReference("baz"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status assertDownloadStatus(fileReference, 0.0); // Receives fileReference, should return and make it available to caller String filename = "abc.jar"; receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content"); Optional<File> downloadedFile = getFile(fileReference); assertTrue(downloadedFile.isPresent()); File downloadedFileFullPath = new File(fileReferenceFullPath, filename); assertEquals(downloadedFileFullPath.getAbsolutePath(), downloadedFile.get().getAbsolutePath()); assertEquals("some other content", IOUtils.readFile(downloadedFile.get())); // Verify download status when downloaded System.out.println(fileDownloader.downloads().downloadStatuses()); assertDownloadStatus(fileReference, 1.0); } { // fileReference does not exist on disk, needs to be downloaded, is compressed data FileReference fileReference = new FileReference("fileReferenceToDirWithManyFiles"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status assertDownloadStatus(fileReference, 0.0); // Receives fileReference, should return and make it available to caller String filename = "abc.tar.gz"; Path tempPath = Files.createTempDirectory("dir"); File subdir = new File(tempPath.toFile(), "subdir"); File fooFile = new File(subdir, "foo"); IOUtils.writeFile(fooFile, "foo", false); // Check that long file names work. (need to do TarArchiveOutPutStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX)) for it to work); File barFile = new File(subdir, "really-long-filename-over-100-bytes-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); IOUtils.writeFile(barFile, "bar", false); File tarFile = new FileReferenceCompressor(compressed, gzip).compress(tempPath.toFile(), List.of(fooFile, barFile), new File(tempPath.toFile(), filename)); byte[] tarredContent = IOUtils.readFileBytes(tarFile); receiveFile(fileReference, filename, compressed, tarredContent); Optional<File> downloadedFile = getFile(fileReference); assertTrue(downloadedFile.isPresent()); File downloadedFoo = new File(fileReferenceFullPath, tempPath.relativize(fooFile.toPath()).toString()); File downloadedBar = new File(fileReferenceFullPath, tempPath.relativize(barFile.toPath()).toString()); assertEquals("foo", IOUtils.readFile(downloadedFoo)); assertEquals("bar", IOUtils.readFile(downloadedBar)); // Verify download status when downloaded assertDownloadStatus(fileReference, 1.0); } }
@Override public List<RoleDO> getRoleList() { return roleMapper.selectList(); }
@Test public void testGetRoleList_ids() { // mock 数据 RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(dbRole01); RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); roleMapper.insert(dbRole02); // 准备参数 Collection<Long> ids = singleton(dbRole01.getId()); // 调用 List<RoleDO> list = roleService.getRoleList(ids); // 断言 assertEquals(1, list.size()); assertPojoEquals(dbRole01, list.get(0)); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } }
@Test public void testToBytesIgnoresSchema() { assertArrayEquals(Utils.utf8("true"), converter.fromConnectData(TOPIC, null, true)); }
@Override public void updateFileConfig(FileConfigSaveReqVO updateReqVO) { // 校验存在 FileConfigDO config = validateFileConfigExists(updateReqVO.getId()); // 更新 FileConfigDO updateObj = FileConfigConvert.INSTANCE.convert(updateReqVO) .setConfig(parseClientConfig(config.getStorage(), updateReqVO.getConfig())); fileConfigMapper.updateById(updateObj); // 清空缓存 clearCache(config.getId(), null); }
@Test public void testUpdateFileConfig_notExists() { // 准备参数 FileConfigSaveReqVO reqVO = randomPojo(FileConfigSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> fileConfigService.updateFileConfig(reqVO), FILE_CONFIG_NOT_EXISTS); }
public static int[] sort(int[] array) { int[] order = new int[array.length]; for (int i = 0; i < order.length; i++) { order[i] = i; } sort(array, order); return order; }
@Test public void testSortObject() { System.out.println("sort object"); Integer[] data1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; int[] order1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; assertArrayEquals(order1, QuickSort.sort(data1)); Integer[] data2 = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; int[] order2 = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; assertArrayEquals(order2, QuickSort.sort(data2)); Integer[] data3 = {0, 1, 2, 3, 5, 4, 6, 7, 8, 9}; int[] order3 = {0, 1, 2, 3, 5, 4, 6, 7, 8, 9}; assertArrayEquals(order3, QuickSort.sort(data3)); Integer[] data4 = {4, 1, 2, 3, 0, 5, 6, 7, 8, 9}; int[] order4 = {4, 1, 2, 3, 0, 5, 6, 7, 8, 9}; assertArrayEquals(order4, QuickSort.sort(data4)); }
public static <T> void forEachWithIndex(Iterable<T> iterable, ObjectIntProcedure<? super T> procedure) { FJIterate.forEachWithIndex(iterable, procedure, FJIterate.FORK_JOIN_POOL); }
@Test public void testForEachWithIndexToArrayUsingFastListSerialPath() { Integer[] array = new Integer[200]; MutableList<Integer> list = new FastList<>(Interval.oneTo(200)); assertTrue(ArrayIterate.allSatisfy(array, Predicates.isNull())); FJIterate.forEachWithIndex(list, (each, index) -> array[index] = each); assertArrayEquals(array, list.toArray(new Integer[]{})); }
public boolean execute(final Runnable job) { // If there are no available slots, we won't do anything final boolean acquired = slots.tryAcquire(); if (!acquired) { return false; } try { executor.execute(() -> { try { job.run(); } catch (Exception e) { LOG.error("Unhandled job execution error", e); } finally { slots.release(); } }); return true; } catch (RejectedExecutionException e) { // This should not happen because we always check the semaphore before submitting jobs to the pool slots.release(); return false; } }
@Test public void testExecute() throws Exception { final JobWorkerPool pool = new JobWorkerPool("test", 2, new MetricRegistry()); final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(1); final CountDownLatch task1Latch = new CountDownLatch(1); final CountDownLatch task2Latch = new CountDownLatch(1); // Before we do anything, the number of free slots should be the same as the pool size assertThat(pool.freeSlots()).isEqualTo(2); assertThat(pool.anySlotsUsed()).isFalse(); // Execute the first task assertThat(pool.execute(() -> { var ignored = Uninterruptibles.awaitUninterruptibly(task1Latch, 60, TimeUnit.SECONDS); latch1.countDown(); })).isTrue(); // The number of free slots should be reduced by one assertThat(pool.freeSlots()).isEqualTo(1); assertThat(pool.anySlotsUsed()).isTrue(); // Execute the second task assertThat(pool.execute(() -> { var ignored = Uninterruptibles.awaitUninterruptibly(task2Latch, 60, TimeUnit.SECONDS); latch2.countDown(); })).isTrue(); // The number of free slots should be reduced by one assertThat(pool.freeSlots()).isEqualTo(0); // Since there are no slots left, the tasks shouldn't be executed assertThat(pool.execute(() -> {})).isFalse(); assertThat(pool.freeSlots()).isEqualTo(0); // Wait for the first task to finish task1Latch.countDown(); assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue(); // Wait for the second task to finish task2Latch.countDown(); assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue(); pool.shutdown(Duration.ofSeconds(30)); assertThat(pool.anySlotsUsed()).isFalse(); }
@Override public boolean isAdded(Component component) { checkComponent(component); if (analysisMetadataHolder.isFirstAnalysis()) { return true; } return addedComponents.contains(component); }
@Test public void isAdded_fails_with_NPE_if_component_is_null() { assertThatThrownBy(() -> underTest.isAdded(null)) .isInstanceOf(NullPointerException.class) .hasMessage("component can't be null"); }
static void removeChannelIfDisconnected(Channel ch) { if (ch != null && !ch.isConnected()) { ch.removeAttribute(CHANNEL_KEY); } }
@Test void removeChannelIfDisconnectedTest() { Assertions.assertNull(channel.getAttribute(CHANNEL_KEY)); channel.setAttribute(CHANNEL_KEY, header); channel.close(); HeaderExchangeChannel.removeChannelIfDisconnected(channel); Assertions.assertNull(channel.getAttribute(CHANNEL_KEY)); }
@Override public List<T> pollN(int n) { if (n >= size) { // if we need to remove all elements then do fast polling return pollAll(); } List<T> retList = new ArrayList<T>(n); while (n-- > 0 && head != null) { T curr = head.element; this.removeElem(curr); retList.add(curr); } shrinkIfNecessary(); return retList; }
@Test public void testPollNOne() { LOG.info("Test pollN one"); set.add(list.get(0)); List<Integer> l = set.pollN(10); assertEquals(1, l.size()); assertEquals(list.get(0), l.get(0)); LOG.info("Test pollN one - DONE"); }
public Field fieldFrom(Schema schema) { if (schema == null) return null; Schema current = schema; for (String pathSegment : stepsWithoutLast()) { final Field field = current.field(pathSegment); if (field != null) { current = field.schema(); } else { return null; } } return current.field(lastStep()); }
@Test void shouldReturnNullFieldWhenFieldNotFound() { Schema barSchema = SchemaBuilder.struct().field("bar", Schema.INT32_SCHEMA).build(); Schema schema = SchemaBuilder.struct().field("foo", barSchema).build(); assertNull(pathV2("un.known").fieldFrom(schema)); assertNull(pathV2("foo.unknown").fieldFrom(schema)); assertNull(pathV2("unknown").fieldFrom(schema)); assertNull(pathV2("test").fieldFrom(null)); }
@Override public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options) { final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size()); final CreatableTopicCollection topics = new CreatableTopicCollection(); for (NewTopic newTopic : newTopics) { if (topicNameIsUnrepresentable(newTopic.name())) { KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException("The given topic name '" + newTopic.name() + "' cannot be represented in a request.")); topicFutures.put(newTopic.name(), future); } else if (!topicFutures.containsKey(newTopic.name())) { topicFutures.put(newTopic.name(), new KafkaFutureImpl<>()); topics.add(newTopic.convertToCreatableTopic()); } } if (!topics.isEmpty()) { final long now = time.milliseconds(); final long deadline = calcDeadlineMs(now, options.timeoutMs()); final Call call = getCreateTopicsCall(options, topicFutures, topics, Collections.emptyMap(), now, deadline); runnable.call(call, now); } return new CreateTopicsResult(new HashMap<>(topicFutures)); }
@Test public void testCreateTopicsPartialResponse() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectCreateTopicsRequestWithTopics("myTopic", "myTopic2"), prepareCreateTopicsResponse("myTopic", Errors.NONE)); CreateTopicsResult topicsResult = env.adminClient().createTopics( asList(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2))), new NewTopic("myTopic2", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)); topicsResult.values().get("myTopic").get(); TestUtils.assertFutureThrows(topicsResult.values().get("myTopic2"), ApiException.class); } }
public Entry getPostingList(long key) { return dictionary.get(key); }
@Test void requireThatValuesCanBeInserted() { SimpleIndex.Builder builder = new SimpleIndex.Builder(); builder.insert(KEY, new Posting(DOC_ID, 10)); SimpleIndex index = builder.build(); SimpleIndex.Entry e = index.getPostingList(KEY); assertNotNull(e); assertEquals(1, e.docIds.length); builder = new SimpleIndex.Builder(); builder.insert(KEY, new Posting(DOC_ID, 10)); builder.insert(KEY, new Posting(DOC_ID + 1, 20)); index = builder.build(); e = index.getPostingList(KEY); assertEquals(2, e.docIds.length); assertEquals(10, e.dataRefs[0]); assertEquals(20, e.dataRefs[1]); }
static File resolveTempDir(Properties p) { return new File(Optional.ofNullable(p.getProperty("sonar.path.temp")).orElse("temp")); }
@Test public void resolveTempDir_defaults_to_temp() { File file = Shutdowner.resolveTempDir(new Properties()); assertThat(file).isEqualTo(new File("temp")); }
@Udf public <T> T asValue(final T keyColumn) { return keyColumn; }
@Test public void shouldHandleNullPrimitiveTypes() { assertThat(udf.asValue((Boolean)null), is(nullValue())); assertThat(udf.asValue((Integer) null), is(nullValue())); assertThat(udf.asValue((Long) null), is(nullValue())); assertThat(udf.asValue((Double) null), is(nullValue())); assertThat(udf.asValue((String) null), is(nullValue())); }
public boolean canViewAndEditTemplate(CaseInsensitiveString username, List<Role> roles) { for (PipelineTemplateConfig templateConfig : this) { if (canUserEditTemplate(templateConfig, username, roles)) { return true; } } return false; }
@Test public void shouldReturnFalseIfUserCannotViewAndEditAtLeastOneTemplate() throws Exception { CaseInsensitiveString templateAdmin = new CaseInsensitiveString("template-admin"); CaseInsensitiveString nonTemplateAdmin = new CaseInsensitiveString("some-random-user"); TemplatesConfig templates = configForUserWhoCanViewATemplate(); templates.add(PipelineTemplateConfigMother.createTemplate("template200", new Authorization(new AdminsConfig(new AdminUser(templateAdmin))), StageConfigMother.manualStage("stage-name"))); assertThat(templates.canViewAndEditTemplate(nonTemplateAdmin, null), is(false)); }
@Override public ExecutionResult execute(final TaskConfig config, final TaskExecutionContext taskExecutionContext) { return pluginRequestHelper.submitRequest(pluginId, TaskExtension.EXECUTION_REQUEST, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return handlerMap.get(resolvedExtensionVersion).getTaskExecutionBody(config, taskExecutionContext); } @Override public ExecutionResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return handlerMap.get(resolvedExtensionVersion).toExecutionResult(responseBody); } }); }
@Test public void shouldExecuteAndReturnSuccessfulExecutionResultTaskThroughPlugin() { when(pluginManager.submitTo(eq(pluginId), eq(PLUGGABLE_TASK_EXTENSION), any(GoPluginApiRequest.class))).thenReturn(response); when(handler.toExecutionResult(response.responseBody())).thenReturn(ExecutionResult.success("message1")); ExecutionResult result = new JsonBasedTaskExecutor(pluginId, pluginRequestHelper, handlerHashMap).execute(config(), context); assertThat(result.isSuccessful(), is(true)); assertThat(result.getMessagesForDisplay(), is("message1")); ArgumentCaptor<GoPluginApiRequest> argument = ArgumentCaptor.forClass(GoPluginApiRequest.class); verify(pluginManager).submitTo(eq(pluginId), eq(PLUGGABLE_TASK_EXTENSION), argument.capture()); assertThat(argument.getValue().extension(), is(PLUGGABLE_TASK_EXTENSION)); assertThat(argument.getValue().extensionVersion(), is(extensionVersion)); assertThat(argument.getValue().requestName(), is(TaskExtension.EXECUTION_REQUEST)); }
public static Parse parse() { return new AutoValue_TikaIO_Parse.Builder().build(); }
@Test public void testParseDamagedPdfFile() throws IOException { String path = getClass().getResource("/damaged.pdf").getPath(); PCollection<ParseResult> res = p.apply("ParseInvalidPdfFile", TikaIO.parse().filepattern(path)); PAssert.thatSingleton(res) .satisfies( input -> { assertEquals(path, input.getFileLocation()); assertFalse(input.isSuccess()); assertTrue(input.getError() instanceof TikaException); return null; }); p.run(); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testTimestampWithJsonDef(){ AvroSchema<TimestampPojo> schemaWithPojo = AvroSchema.of(SchemaDefinition.<TimestampPojo>builder() .withPojo(TimestampPojo.class) .withJSR310ConversionEnabled(false).build()); TimestampPojo timestampPojo = new TimestampPojo(Instant.parse("2022-06-10T12:38:59.039084Z")); byte[] encode = schemaWithPojo.encode(timestampPojo); TimestampPojo decodeWithPojo = schemaWithPojo.decode(encode); Assert.assertEquals(decodeWithPojo, timestampPojo); String schemaDefinition = new String(schemaWithPojo.schemaInfo.getSchema()); AvroSchema<TimestampPojo> schemaWithJsonDef = AvroSchema.of(SchemaDefinition.<TimestampPojo>builder() .withJsonDef(schemaDefinition) .withClassLoader(TimestampPojo.class.getClassLoader()) .withJSR310ConversionEnabled(false).build()); TimestampPojo decodeWithJson = schemaWithJsonDef.decode(encode); Assert.assertEquals(decodeWithJson, decodeWithPojo); Assert.assertEquals(Instant.class, decodeWithJson.getValue().getClass()); AvroSchema<TimestampPojo> schemaWithJsonDefNoClassLoader = AvroSchema.of(SchemaDefinition.<TimestampPojo>builder() .withJsonDef(schemaDefinition) .withJSR310ConversionEnabled(false).build()); TimestampPojo decodeWithJsonNoClassLoader = schemaWithJsonDefNoClassLoader.decode(encode); Assert.assertNotEquals(decodeWithJsonNoClassLoader, decodeWithPojo); Assert.assertNotEquals(Instant.class, decodeWithJsonNoClassLoader.getValue().getClass()); }
@Override public void configure() throws Exception { server.addEventListener(mbeans()); server.addConnector(plainConnector()); ContextHandlerCollection handlers = new ContextHandlerCollection(); deploymentManager.setContexts(handlers); webAppContext = createWebAppContext(); JettyCustomErrorPageHandler errorHandler = new JettyCustomErrorPageHandler(); webAppContext.setErrorHandler(errorHandler); webAppContext.insertHandler(gzipHandler()); server.addBean(errorHandler); server.addBean(deploymentManager); HandlerCollection serverLevelHandlers = new HandlerCollection(); serverLevelHandlers.setHandlers(new Handler[]{handlers}); server.setHandler(serverLevelHandlers); performCustomConfiguration(); server.setStopAtShutdown(true); }
@Test public void shouldAddMBeanContainerAsEventListener() throws Exception { ArgumentCaptor<MBeanContainer> captor = ArgumentCaptor.forClass(MBeanContainer.class); jettyServer.configure(); verify(server).addEventListener(captor.capture()); MBeanContainer mBeanContainer = captor.getValue(); assertThat(mBeanContainer.getMBeanServer()).isNotNull(); }
public static OffsetAndMetadata fromRequest( OffsetCommitRequestData.OffsetCommitRequestPartition partition, long currentTimeMs, OptionalLong expireTimestampMs ) { return new OffsetAndMetadata( partition.committedOffset(), ofSentinel(partition.committedLeaderEpoch()), partition.committedMetadata() == null ? OffsetAndMetadata.NO_METADATA : partition.committedMetadata(), partition.commitTimestamp() == OffsetCommitRequest.DEFAULT_TIMESTAMP ? currentTimeMs : partition.commitTimestamp(), expireTimestampMs ); }
@Test public void testFromTransactionalRequest() { MockTime time = new MockTime(); TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition partition = new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(-1) .setCommittedMetadata(null); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.empty(), "", time.milliseconds(), OptionalLong.empty() ), OffsetAndMetadata.fromRequest( partition, time.milliseconds() ) ); partition .setCommittedLeaderEpoch(10) .setCommittedMetadata("hello"); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.of(10), "hello", time.milliseconds(), OptionalLong.empty() ), OffsetAndMetadata.fromRequest( partition, time.milliseconds() ) ); }
public ConnectionAuthContext authorizePeer(X509Certificate cert) { return authorizePeer(List.of(cert)); }
@Test void can_match_subset_of_policies() { PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, createRequiredCredential(CN, "*.matching.cn")), createPolicy(POLICY_2, createRequiredCredential(SAN_DNS, "*.matching.san"))); ConnectionAuthContext result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", List.of("foo.matching.san"), List.of())); assertAuthorized(result); assertThat(result.matchedPolicies()).containsOnly(POLICY_2); }
@Override public GroupAssignment assign( GroupSpec groupSpec, SubscribedTopicDescriber subscribedTopicDescriber ) throws PartitionAssignorException { if (groupSpec.memberIds().isEmpty()) { return new GroupAssignment(Collections.emptyMap()); } else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) { return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber); } else { return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber); } }
@Test public void testOneMemberSubscribedToNonExistentTopic() { SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl( Collections.singletonMap( topic1Uuid, new TopicMetadata( topic1Uuid, topic1Name, 3, Collections.emptyMap() ) ) ); Map<String, MemberSubscriptionAndAssignmentImpl> members = Collections.singletonMap( memberA, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.empty(), mkSet(topic2Uuid), Assignment.EMPTY ) ); GroupSpec groupSpec = new GroupSpecImpl( members, HOMOGENEOUS, Collections.emptyMap() ); assertThrows(PartitionAssignorException.class, () -> assignor.assign(groupSpec, subscribedTopicMetadata)); }
@Override public AppResponse process(Flow flow, SessionDataRequest request) throws SharedServiceClientException { return validateAmountOfApps(flow, appSession.getAccountId(), request) .orElseGet(() -> validateSms(flow, appSession.getAccountId(), request.getSmscode()) .orElseGet(() -> confirmSession(flow, request))); }
@Test void processUndefinedFlow() throws SharedServiceClientException { AppResponse appResponse = sessionConfirmed.process(mockedUndefinedFlow, mockedSessionDataRequest); AppAuthenticator appAuthenticator = sessionConfirmed.getAppAuthenticator(); assertSame(mockedAppAuthenticator, appAuthenticator); assertEquals(DEVICE_NAME, appAuthenticator.getDeviceName()); assertEquals(SESSION_DATA_REQUEST_INSTANCE_ID, appAuthenticator.getInstanceId()); assertEquals(VALID_RESPONSE_CODE, ((SessionDataResponse)appResponse).getStatus()); assertEquals(APP_AUTHENTICATOR_USER_APP_ID, ((SessionDataResponse)appResponse).getUserAppId()); }
@Override public void checkBeforeUpdate(final CreateReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkCreation(database, sqlStatement.getRules(), null == rule ? null : rule.getConfiguration(), sqlStatement.isIfNotExists()); }
@Test void assertCheckSQLStatementWithDuplicateResource() { when(resourceMetaData.getStorageUnits()).thenReturn(Collections.singletonMap("write_ds", null)); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(createCurrentRuleConfiguration()); executor.setRule(rule); assertThrows(InvalidRuleConfigurationException.class, () -> executor.checkBeforeUpdate(createSQLStatement("write_ds", "TEST"))); }
public boolean allSearchFiltersVisible() { return hiddenSearchFiltersIDs.isEmpty(); }
@Test void testAllSearchFiltersVisibleReturnsFalseIfHiddenFilterIsForbidden() { toTest = new SearchFilterVisibilityCheckStatus(ImmutableList.of("Allowed hidden one", "Forbidden hidden one")); assertFalse(toTest.allSearchFiltersVisible(ImmutableList.of("Allowed hidden one", "Another allowed hidden one"))); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayWithUnsupportedVersion() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ConsumerGroupCurrentMemberAssignmentKey key = new ConsumerGroupCurrentMemberAssignmentKey(); ConsumerGroupCurrentMemberAssignmentValue value = new ConsumerGroupCurrentMemberAssignmentValue(); assertThrows(IllegalStateException.class, () -> coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 255), new ApiMessageAndVersion(value, (short) 0) )) ); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldCreateSourceWithReadPermissionsAllowed() { // Given: final Statement statement = givenStatement(String.format( "CREATE STREAM s1 WITH (kafka_topic='%s', value_format='JSON');", KAFKA_TOPIC) ); // When/Then: authorizationValidator.checkAuthorization(securityContext, metaStore, statement); }
MethodSpec buildLinkLibraryMethod() { MethodSpec.Builder methodBuilder = MethodSpec.methodBuilder("linkLibraries") .addModifiers(Modifier.PUBLIC, Modifier.STATIC) .addParameter( ParameterizedTypeName.get( ClassName.get(List.class), ClassName.get(Contract.LinkReference.class)), "references") .addStatement( LIBRARIES_LINKED_BINARY + " = " + "linkBinaryWithReferences(" + BINARY + ", references)"); return methodBuilder.build(); }
@Test public void testBuildFunctionLinkBinaryWithReferences() throws Exception { MethodSpec methodSpec = solidityFunctionWrapper.buildLinkLibraryMethod(); String expected = "public static void linkLibraries(java.util.List<org.web3j.tx.Contract.LinkReference> references) {\n" + " librariesLinkedBinary = linkBinaryWithReferences(BINARY, references);\n" + "}\n"; assertEquals(methodSpec.toString(), (expected)); }
@Udf(description = "Converts the number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into " + "a TIMESTAMP value.") public Timestamp fromUnixTime( @UdfParameter( description = "Milliseconds since" + " January 1, 1970, 00:00:00 GMT.") final Long epochMilli ) { if (epochMilli == null) { return null; } return new Timestamp(epochMilli); }
@Test public void shouldReturnNull() { // When: final Object result = udf.fromUnixTime(null); // Then: assertNull(result); }
@Draft public Object[] recvPicture(Socket socket, String picture) { if (!FORMAT.matcher(picture).matches()) { throw new ZMQException(picture + " is not in expected format " + FORMAT.pattern(), ZError.EPROTO); } Object[] elements = new Object[picture.length()]; for (int index = 0; index < picture.length(); index++) { char pattern = picture.charAt(index); switch (pattern) { case 'i': { elements[index] = Integer.valueOf(socket.recvStr()); break; } case '1': { elements[index] = (0xff) & Integer.parseInt(socket.recvStr()); break; } case '2': { elements[index] = (0xffff) & Integer.parseInt(socket.recvStr()); break; } case '4': { elements[index] = (0xffffffff) & Integer.parseInt(socket.recvStr()); break; } case '8': { elements[index] = Long.valueOf(socket.recvStr()); break; } case 's': { elements[index] = socket.recvStr(); break; } case 'b': case 'c': { elements[index] = socket.recv(); break; } case 'f': { elements[index] = ZFrame.recvFrame(socket); break; } case 'm': { elements[index] = ZMsg.recvMsg(socket); break; } case 'z': { ZFrame zeroFrame = ZFrame.recvFrame(socket); if (zeroFrame == null || zeroFrame.size() > 0) { throw new ZMQException("zero frame is not empty", ZError.EPROTO); } elements[index] = new ZFrame((byte[]) null); break; } default: assert (false) : "invalid picture element '" + pattern + "'"; } } return elements; }
@Test(expected = ZMQException.class) public void testReceiveInvalidPictureFormat() { String picture = "x"; pic.recvPicture(null, picture); }
public static int[] getCutIndices(String s, String splitChar, int index) { int found = 0; char target = splitChar.charAt(0); for (int i = 0; i < s.length(); i++) { if (s.charAt(i) == target) { found++; } if (found == index) { int begin = i; if (begin != 0) { begin += 1; } int end = s.indexOf(target, i + 1); // End will be -1 if this is the last last token in the string and there is no other occurence. if (end == -1) { end = s.length(); } return new int[]{begin, end}; } } return new int[]{0, 0}; }
@Test public void testCutIndicesReturnsZeroesOnInvalidBoundaries() throws Exception { int[] result = SplitAndIndexExtractor.getCutIndices("<10> 07 Aug 2013 somesubsystem: this is my message for username9001 id:9001", " ", 9001); assertEquals(0, result[0]); assertEquals(0, result[1]); }
public void putEphemeral(final String key, final String value) { try { KV kvClient = client.getKVClient(); kvClient.put(ByteSequence.from(key, UTF_8), ByteSequence.from(value, UTF_8), PutOption.newBuilder().withLeaseId(globalLeaseId).build()) .get(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { LOGGER.error("putEphemeral(key:{},value:{}) error.", key, value, e); } }
@Test public void putEphemeralTest() { try (MockedStatic<Client> clientMockedStatic = mockStatic(Client.class)) { final Client client = this.mockEtcd(clientMockedStatic); final KV mockKV = mock(KV.class); when(client.getKVClient()).thenReturn(mockKV); final CompletableFuture<PutResponse> completableFuture = mock(CompletableFuture.class); when(mockKV.put(any(), any(), any())).thenReturn(completableFuture); final PutResponse putResponse = mock(PutResponse.class); when(completableFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(putResponse); final EtcdClient etcdClient = new EtcdClient("url", 60L, 3000L); etcdClient.putEphemeral("key", "value"); doThrow(new InterruptedException("error")).when(completableFuture).get(anyLong(), any(TimeUnit.class)); etcdClient.putEphemeral("key", "value"); } catch (Exception e) { throw new ShenyuException(e.getCause()); } }
@Override public List<PartitionGroupMetadata> computePartitionGroupMetadata(String clientId, StreamConfig streamConfig, List<PartitionGroupConsumptionStatus> partitionGroupConsumptionStatuses, int timeoutMillis) throws IOException, TimeoutException { List<PartitionGroupMetadata> newPartitionGroupMetadataList = new ArrayList<>(); Map<String, Shard> shardIdToShardMap = _kinesisConnectionHandler.getShards().stream() .collect(Collectors.toMap(Shard::shardId, s -> s, (s1, s2) -> s1)); Set<String> shardsInCurrent = new HashSet<>(); Set<String> shardsEnded = new HashSet<>(); // TODO: Once we start supporting multiple shards in a PartitionGroup, // we need to iterate over all shards to check if any of them have reached end // Process existing shards. Add them to new list if still consuming from them for (PartitionGroupConsumptionStatus currentPartitionGroupConsumptionStatus : partitionGroupConsumptionStatuses) { KinesisPartitionGroupOffset kinesisStartCheckpoint = (KinesisPartitionGroupOffset) currentPartitionGroupConsumptionStatus.getStartOffset(); String shardId = kinesisStartCheckpoint.getShardId(); shardsInCurrent.add(shardId); Shard shard = shardIdToShardMap.get(shardId); if (shard == null) { // Shard has expired shardsEnded.add(shardId); String lastConsumedSequenceID = kinesisStartCheckpoint.getSequenceNumber(); LOGGER.warn( "Kinesis shard with id: {} has expired. Data has been consumed from the shard till sequence number: {}. " + "There can be potential data loss.", shardId, lastConsumedSequenceID); continue; } StreamPartitionMsgOffset newStartOffset; StreamPartitionMsgOffset currentEndOffset = currentPartitionGroupConsumptionStatus.getEndOffset(); if (currentEndOffset != null) { // Segment DONE (committing/committed) String endingSequenceNumber = shard.sequenceNumberRange().endingSequenceNumber(); if (endingSequenceNumber != null) { // Shard has ended, check if we're also done consuming it if (consumedEndOfShard(currentEndOffset, currentPartitionGroupConsumptionStatus)) { shardsEnded.add(shardId); continue; // Shard ended and we're done consuming it. Skip } } newStartOffset = currentEndOffset; } else { // Segment IN_PROGRESS newStartOffset = currentPartitionGroupConsumptionStatus.getStartOffset(); } newPartitionGroupMetadataList.add( new PartitionGroupMetadata(currentPartitionGroupConsumptionStatus.getPartitionGroupId(), newStartOffset)); } // Add brand new shards for (Map.Entry<String, Shard> entry : shardIdToShardMap.entrySet()) { // If shard was already in current list, skip String newShardId = entry.getKey(); if (shardsInCurrent.contains(newShardId)) { continue; } Shard newShard = entry.getValue(); String parentShardId = newShard.parentShardId(); // Add the new shard in the following 3 cases: // 1. Root shards - Parent shardId will be null. Will find this case when creating new table. // 2. Parent expired - Parent shardId will not be part of shardIdToShard map // 3. Parent reached EOL and completely consumed. if (parentShardId == null || !shardIdToShardMap.containsKey(parentShardId) || shardsEnded.contains( parentShardId)) { // TODO: Revisit this. Kinesis starts consuming AFTER the start sequence number, and we might miss the first // message. StreamPartitionMsgOffset newStartOffset = new KinesisPartitionGroupOffset(newShardId, newShard.sequenceNumberRange().startingSequenceNumber()); int partitionGroupId = getPartitionGroupIdFromShardId(newShardId); newPartitionGroupMetadataList.add(new PartitionGroupMetadata(partitionGroupId, newStartOffset)); } } return newPartitionGroupMetadataList; }
@Test public void getPartitionsGroupInfoListTest() throws Exception { Shard shard0 = Shard.builder().shardId(SHARD_ID_0) .sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build(); Shard shard1 = Shard.builder().shardId(SHARD_ID_1) .sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build(); when(_kinesisConnectionHandler.getShards()).thenReturn(ImmutableList.of(shard0, shard1)); List<PartitionGroupMetadata> result = _kinesisStreamMetadataProvider.computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(), new ArrayList<>(), TIMEOUT); Assert.assertEquals(result.size(), 2); Assert.assertEquals(result.get(0).getPartitionGroupId(), 0); Assert.assertEquals(result.get(1).getPartitionGroupId(), 1); }
@Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> properties) throws Exception { AS400ConnectionPool connectionPool; if (properties.containsKey(CONNECTION_POOL)) { LOG.trace("AS400ConnectionPool instance specified in the URI - will look it up."); // We have chosen to handle the connectionPool option ourselves, so // we must remove it from the given parameter list (see // http://camel.apache.org/writing-components.html) String poolId = properties.remove(CONNECTION_POOL).toString(); connectionPool = EndpointHelper.resolveReferenceParameter(getCamelContext(), poolId, AS400ConnectionPool.class, true); } else { LOG.trace("No AS400ConnectionPool instance specified in the URI - one will be provided."); connectionPool = getConnectionPool(); } String type = remaining.substring(remaining.lastIndexOf('.') + 1).toUpperCase(); Jt400Endpoint endpoint = new Jt400Endpoint(uri, this, connectionPool); setProperties(endpoint, properties); endpoint.setType(Jt400Type.valueOf(type)); return endpoint; }
@Test public void testCreateEndpointForOtherObjectType() { try { component.createEndpoint("jt400://user:password@host/qsys.lib/library.lib/program.xxx"); fail("Exception should been thrown when trying to create an endpoint for an unsupported object type"); } catch (Exception e) { // this is just what we expected } }
protected Collection<Identity> filter(final Credentials credentials, final Collection<Identity> identities) { if(credentials.isPublicKeyAuthentication()) { final Local selected = credentials.getIdentity(); for(Identity identity : identities) { if(identity.getComment() != null) { final String candidate = new String(identity.getComment(), StandardCharsets.UTF_8); if(selected.getAbsolute().equals(candidate)) { if(log.isDebugEnabled()) { log.debug(String.format("Matching identity %s found", candidate)); } return Collections.singletonList(identity); } } } } return identities; }
@Test public void filterIdentitiesNoKeySet() { final SFTPAgentAuthentication authentication = new SFTPAgentAuthentication(new SSHClient(), new OpenSSHAgentAuthenticator(new AgentProxy(null))); final Credentials credentials = new Credentials("user"); final List<Identity> identities = new ArrayList<>(); final Identity nomatch = mock(Identity.class); when(nomatch.getComment()).thenReturn(StringUtils.getBytes("comment1", StandardCharsets.UTF_8)); identities.add(nomatch); identities.add(nomatch); final Collection<Identity> filtered = authentication.filter(credentials, identities); assertEquals(2, filtered.size()); }
public static String format(double amount, boolean isUseTraditional) { return format(amount, isUseTraditional, false); }
@Test public void formatTrillionsLongTest() { String f = NumberChineseFormatter.format(1_0000_0000_0000L, false); assertEquals("一万亿", f); f = NumberChineseFormatter.format(1_0000_1000_0000L, false); assertEquals("一万亿零一千万", f); f = NumberChineseFormatter.format(1_0010_0000_0000L, false); assertEquals("一万零一十亿", f); }
static <K> Throwable getSubLevelError(Map<K, Errors> subLevelErrors, K subKey, String keyNotFoundMsg) { if (!subLevelErrors.containsKey(subKey)) { return new IllegalArgumentException(keyNotFoundMsg); } else { return subLevelErrors.get(subKey).exception(); } }
@Test public void testGetSubLevelError() { List<MemberIdentity> memberIdentities = asList( new MemberIdentity().setGroupInstanceId("instance-0"), new MemberIdentity().setGroupInstanceId("instance-1")); Map<MemberIdentity, Errors> errorsMap = new HashMap<>(); errorsMap.put(memberIdentities.get(0), Errors.NONE); errorsMap.put(memberIdentities.get(1), Errors.FENCED_INSTANCE_ID); assertEquals(IllegalArgumentException.class, KafkaAdminClient.getSubLevelError(errorsMap, new MemberIdentity().setGroupInstanceId("non-exist-id"), "For unit test").getClass()); assertNull(KafkaAdminClient.getSubLevelError(errorsMap, memberIdentities.get(0), "For unit test")); assertEquals(FencedInstanceIdException.class, KafkaAdminClient.getSubLevelError( errorsMap, memberIdentities.get(1), "For unit test").getClass()); }
public void initAndAddExternalIssue(ExternalIssue issue) { DefaultInputComponent inputComponent = (DefaultInputComponent) issue.primaryLocation().inputComponent(); ScannerReport.ExternalIssue rawExternalIssue = createReportExternalIssue(issue, inputComponent.scannerId()); write(inputComponent.scannerId(), rawExternalIssue); }
@Test public void add_external_issue_to_cache() { initModuleIssues(); DefaultExternalIssue issue = new DefaultExternalIssue(project) .at(new DefaultIssueLocation().on(file).at(file.selectLine(3)).message("Foo")) .type(RuleType.BUG) .forRule(JAVA_RULE_KEY) .severity(org.sonar.api.batch.rule.Severity.CRITICAL); moduleIssues.initAndAddExternalIssue(issue); ArgumentCaptor<ScannerReport.ExternalIssue> argument = ArgumentCaptor.forClass(ScannerReport.ExternalIssue.class); verify(reportPublisher.getWriter()).appendComponentExternalIssue(eq(file.scannerId()), argument.capture()); assertThat(argument.getValue().getSeverity()).isEqualTo(org.sonar.scanner.protocol.Constants.Severity.CRITICAL); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testOr() { final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate | com.linkedin.data.it.AlwaysFalsePredicate"); Assert.assertEquals(parsed.getClass(), OrPredicate.class); final List<Predicate> children = ((OrPredicate) parsed).getChildPredicates(); Assert.assertEquals(children.get(0).getClass(), AlwaysTruePredicate.class); Assert.assertEquals(children.get(1).getClass(), AlwaysFalsePredicate.class); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = false; boolean containsNull = false; // Spec. definition: return true if any item is true, else false if all items are false, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result |= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && !result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeListParamReturnTrue() { FunctionTestUtil.assertResult(anyFunction.invoke(Arrays.asList(Boolean.TRUE, Boolean.TRUE)), true); FunctionTestUtil.assertResult(anyFunction.invoke(Arrays.asList(Boolean.TRUE, Boolean.FALSE)), true); FunctionTestUtil.assertResult(anyFunction.invoke(Arrays.asList(Boolean.TRUE, null)), true); FunctionTestUtil.assertResult(anyFunction.invoke(Arrays.asList(Boolean.TRUE, null, Boolean.FALSE)), true); }
public void incQueueGetSize(final String group, final String topic, final Integer queueId, final int incValue) { if (enableQueueStat) { final String statsKey = buildStatsKey(topic, queueId, group); this.statsTable.get(Stats.QUEUE_GET_SIZE).addValue(statsKey, incValue, 1); } }
@Test public void testIncQueueGetSize() { brokerStatsManager.incQueueGetSize(GROUP_NAME, TOPIC, QUEUE_ID, 1); final String statsKey = brokerStatsManager.buildStatsKey(brokerStatsManager.buildStatsKey(TOPIC, String.valueOf(QUEUE_ID)), GROUP_NAME); assertThat(brokerStatsManager.getStatsItem(QUEUE_GET_SIZE, statsKey).getValue().doubleValue()).isEqualTo(1L); }
@VisibleForTesting public String getLullMessage(Thread trackedThread, Duration millis) { // TODO(ajamato): Share getLullMessage code with DataflowExecutionState. String userStepName = this.labelsMetadata.getOrDefault(MonitoringInfoConstants.Labels.PTRANSFORM, null); StringBuilder message = new StringBuilder(); message.append("Operation ongoing"); if (userStepName != null) { message.append(" in step ").append(userStepName); } message .append(" for at least ") .append(formatDuration(millis)) .append(" without outputting or completing in state ") .append(getStateName()); message.append("\n"); StackTraceElement[] fullTrace = trackedThread.getStackTrace(); for (StackTraceElement e : fullTrace) { message.append(" at ").append(e).append("\n"); } return message.toString(); }
@Test public void testGetLullReturnsARelevantMessageWithoutStepName() { HashMap<String, String> labelsMetadata = new HashMap<String, String>(); SimpleExecutionState testObject = new SimpleExecutionState("myState", null, labelsMetadata); String message = testObject.getLullMessage(new Thread(), Duration.millis(100_000)); assertThat(message, containsString("myState")); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testUnknownSpdySynReplyFrameFlags() throws Exception { short type = 2; byte flags = (byte) 0xFE; // undefined flags int length = 4; int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); decoder.decode(buf); verify(delegate).readSynReplyFrame(streamId, false); verify(delegate).readHeaderBlockEnd(); assertFalse(buf.isReadable()); buf.release(); }
@Nonnull @Override public ScheduledFuture<?> scheduleAtFixedRate(@Nonnull Runnable command, long initialDelay, long period, @Nonnull TimeUnit unit) { scheduledRepetitively.mark(); return delegate.scheduleAtFixedRate(new InstrumentedPeriodicRunnable(command, period, unit), initialDelay, period, unit); }
@Test public void testScheduleFixedRateCallable() throws Exception { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); ScheduledFuture<?> theFuture = instrumentedScheduledExecutor.scheduleAtFixedRate(() -> { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isEqualTo(1); assertThat(scheduledOnce.getCount()).isEqualTo(0); assertThat(scheduledRepetitively.getCount()).isEqualTo(1); try { TimeUnit.MILLISECONDS.sleep(50); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }, 10L, 10L, TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS.sleep(100); theFuture.cancel(true); TimeUnit.MILLISECONDS.sleep(100); assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isNotEqualTo(0); assertThat(duration.getCount()).isNotEqualTo(0); assertThat(duration.getSnapshot().size()).isNotEqualTo(0); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isEqualTo(1); assertThat(scheduledOverrun.getCount()).isNotEqualTo(0); assertThat(percentOfPeriod.getCount()).isNotEqualTo(0); }
public RunResponse restart(RunRequest runRequest) { RunResponse runResponse = restartRecursively(runRequest); if (runResponse.getStatus() == RunResponse.Status.NON_TERMINAL_ERROR) { LOG.error( "workflow instance {} does not support restart action as it is in a non-terminal status [{}]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); throw new MaestroBadRequestException( Collections.emptyList(), "workflow instance %s does not support restart action as it is in a non-terminal status [%s]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); } return runResponse; }
@Test public void testInvalidRestart() { when(runStrategyDao.startWithRunStrategy(any(), any())).thenReturn(1); WorkflowInstance wfInstance = new WorkflowInstance(); wfInstance.setInitiator(new ManualInitiator()); wfInstance.setStatus(WorkflowInstance.Status.IN_PROGRESS); wfInstance.setWorkflowInstanceId(10L); wfInstance.setWorkflowRunId(1L); wfInstance.setWorkflowId("test-workflow"); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 1L, null).build()) .build(); when(instanceDao.getWorkflowInstance("test-workflow", 1L, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(wfInstance); AssertHelper.assertThrows( "workflow instance does not support restart action", MaestroBadRequestException.class, "workflow instance [test-workflow][1] does not support restart action as it is in a non-terminal status [IN_PROGRESS]", () -> actionHandler.restart(request)); wfInstance.setInitiator(new ManualInitiator()); RunRequest request2 = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 1L, null).build()) .build(); AssertHelper.assertThrows( "Cannot restart a workflow instance for new instance", MaestroBadRequestException.class, "workflow instance [test-workflow][1] does not support restart action as it is in a non-terminal status [IN_PROGRESS]", () -> actionHandler.restart(request2)); RunRequest request3 = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .stepRunParams(Collections.singletonMap("job1", Collections.emptyMap())) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 1L, null).build()) .build(); wfInstance.setStatus(WorkflowInstance.Status.FAILED); AssertHelper.assertThrows( "Cannot restart a workflow instance for new instance", IllegalArgumentException.class, "Cannot restart a workflow instance [test-workflow][10][1] as it does not match run request [test-workflow][1]", () -> actionHandler.restart(request3)); }
public V computeIfAbsent(K key, Function<? super K, ? extends V> valueFunction) { V value = cache.get(key); if (value == null) { if (cache.size() < capacity) { // use CHM.computeIfAbsent to avoid duplicate calculation of a single key value = cache.computeIfAbsent(key, valueFunction); } else { value = valueFunction.apply(key); } } return value; }
@Test public void when_addingElementsToCacheInSingleThread_then_properSizeAndElements() { int capacity = 20; int elementsToAdd = 100; ConcurrentInitialSetCache<Integer, Integer> cache = new ConcurrentInitialSetCache<>(capacity); for (int i = 0; i < elementsToAdd; i++) { cache.computeIfAbsent(i, Function.identity()); } assertEquals(capacity, cache.cache.size()); for (int i = 0; i < capacity; i++) { assertTrue(cache.cache.containsKey(i)); } }
public static Future<Void> maybeUpdateMetadataVersion( Reconciliation reconciliation, Vertx vertx, TlsPemIdentity coTlsPemIdentity, AdminClientProvider adminClientProvider, String desiredMetadataVersion, KafkaStatus status ) { String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT; LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace()); Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); Promise<Void> updatePromise = Promise.promise(); maybeUpdateMetadataVersion(reconciliation, vertx, kafkaAdmin, desiredMetadataVersion, status) .onComplete(res -> { // Close the Admin client and return the original result LOGGER.debugCr(reconciliation, "Closing the Kafka Admin API connection"); kafkaAdmin.close(); updatePromise.handle(res); }); return updatePromise.future(); }
@Test public void testSuccessfulMetadataVersionDowngrade(VertxTestContext context) { // Mock the Admin client Admin mockAdminClient = mock(Admin.class); // Mock describing the current metadata version mockDescribeVersion(mockAdminClient); // Mock updating metadata version UpdateFeaturesResult ufr = mock(UpdateFeaturesResult.class); when(ufr.values()).thenReturn(Map.of(KRaftMetadataManager.METADATA_VERSION_KEY, KafkaFuture.completedFuture(null))); @SuppressWarnings(value = "unchecked") ArgumentCaptor<Map<String, FeatureUpdate>> updateCaptor = ArgumentCaptor.forClass(Map.class); when(mockAdminClient.updateFeatures(updateCaptor.capture(), any())).thenReturn(ufr); // Mock the Admin client provider AdminClientProvider mockAdminClientProvider = mockAdminClientProvider(mockAdminClient); // Dummy KafkaStatus to check the values from KafkaStatus status = new KafkaStatus(); Checkpoint checkpoint = context.checkpoint(); KRaftMetadataManager.maybeUpdateMetadataVersion(Reconciliation.DUMMY_RECONCILIATION, vertx, DUMMY_IDENTITY, mockAdminClientProvider, "3.5", status) .onComplete(context.succeeding(s -> { assertThat(status.getKafkaMetadataVersion(), is("3.5-IV2")); verify(mockAdminClient, times(1)).updateFeatures(any(), any()); verify(mockAdminClient, times(1)).describeFeatures(); assertThat(updateCaptor.getAllValues().size(), is(1)); assertThat(updateCaptor.getValue().get(KRaftMetadataManager.METADATA_VERSION_KEY).upgradeType(), is(FeatureUpdate.UpgradeType.SAFE_DOWNGRADE)); assertThat(updateCaptor.getValue().get(KRaftMetadataManager.METADATA_VERSION_KEY).maxVersionLevel(), is((short) 11)); checkpoint.flag(); })); }
@Override public StateMachineInstance getStateMachineInstance(String stateMachineInstanceId) { StateMachineInstance stateMachineInstance = selectOne(stateLogStoreSqls.getGetStateMachineInstanceByIdSql(dbType), RESULT_SET_TO_STATE_MACHINE_INSTANCE, stateMachineInstanceId); if (stateMachineInstance == null) { return null; } List<StateInstance> stateInstanceList = queryStateInstanceListByMachineInstanceId(stateMachineInstanceId); for (StateInstance stateInstance : stateInstanceList) { stateMachineInstance.putStateInstance(stateInstance.getId(), stateInstance); } deserializeParamsAndException(stateMachineInstance); return stateMachineInstance; }
@Test public void testGetStateMachineInstance() { Assertions.assertDoesNotThrow(() -> dbAndReportTcStateLogStore.getStateInstance("test", "test")); }
@Override public Set<byte[]> zDiff(byte[]... sets) { List<Object> args = new ArrayList<>(sets.length + 1); args.add(sets.length); args.addAll(Arrays.asList(sets)); return write(sets[0], ByteArrayCodec.INSTANCE, ZDIFF, args.toArray()); }
@Test public void testZDiff() { StringRedisTemplate redisTemplate = new StringRedisTemplate(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); redisTemplate.boundZSetOps("test").add("1", 10); redisTemplate.boundZSetOps("test").add("2", 20); redisTemplate.boundZSetOps("test").add("3", 30); redisTemplate.boundZSetOps("test").add("4", 30); redisTemplate.boundZSetOps("test2").add("5", 50); redisTemplate.boundZSetOps("test2").add("2", 20); redisTemplate.boundZSetOps("test2").add("3", 30); redisTemplate.boundZSetOps("test2").add("6", 60); Set<String> objs = redisTemplate.boundZSetOps("test").difference("test2"); assertThat(objs).hasSize(2); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseBooleanLiteralsEmbeddedInMap() { SchemaAndValue schemaAndValue = Values.parseString("{true: false, false: true}"); assertEquals(Type.MAP, schemaAndValue.schema().type()); assertEquals(Type.BOOLEAN, schemaAndValue.schema().keySchema().type()); assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type()); Map<Boolean, Boolean> expectedValue = new HashMap<>(); expectedValue.put(true, false); expectedValue.put(false, true); assertEquals(expectedValue, schemaAndValue.value()); }
protected String getRequestURL(HttpServletRequest request) { StringBuffer sb = request.getRequestURL(); if (request.getQueryString() != null) { sb.append("?").append(request.getQueryString()); } return sb.toString(); }
@Test public void testGetRequestURL() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { FilterConfig config = Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")). thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn( DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn( new Vector<String>( Arrays.asList(AuthenticationFilter.AUTH_TYPE, "management.operation.return")).elements()); getMockedServletContextWithStringSigner(config); filter.init(config); HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getQueryString()).thenReturn("a=A&b=B"); Assert.assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request)); } finally { filter.destroy(); } }
public static <T> Supplier<T> recover(Supplier<T> supplier, Predicate<T> resultPredicate, UnaryOperator<T> resultHandler) { return () -> { T result = supplier.get(); if(resultPredicate.test(result)){ return resultHandler.apply(result); } return result; }; }
@Test(expected = RuntimeException.class) public void shouldRethrowException2() { Supplier<String> supplier = () -> { throw new RuntimeException("BAM!"); }; Supplier<String> supplierWithRecovery = SupplierUtils.recover(supplier, IllegalArgumentException.class, (ex) -> "Bla"); supplierWithRecovery.get(); }
@Override public Set<ApplicationId> getAllSuspendedApplications() { Set<ApplicationInstanceReference> refSet = statusService.getAllSuspendedApplications(); return refSet.stream().map(OrchestratorUtil::toApplicationId).collect(toSet()); }
@Test public void appliations_list_returns_empty_initially() { assertTrue(orchestrator.getAllSuspendedApplications().isEmpty()); }
@Description("Inverse of Weibull cdf given a, b parameters and probability") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double inverseWeibullCdf( @SqlType(StandardTypes.DOUBLE) double a, @SqlType(StandardTypes.DOUBLE) double b, @SqlType(StandardTypes.DOUBLE) double p) { checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "inverseWeibullCdf Function: p must be in the interval [0, 1]"); checkCondition(a > 0, INVALID_FUNCTION_ARGUMENT, "inverseWeibullCdf Function: a must be greater than 0"); checkCondition(b > 0, INVALID_FUNCTION_ARGUMENT, "inverseWeibullCdf Function: b must be greater than 0"); WeibullDistribution distribution = new WeibullDistribution(null, a, b, WeibullDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.inverseCumulativeProbability(p); }
@Test public void testInverseWeibullCdf() { assertFunction("inverse_weibull_cdf(1.0, 1.0, 0.0)", DOUBLE, 0.0); assertFunction("round(inverse_weibull_cdf(1.0, 1.0, 0.632), 2)", DOUBLE, 1.00); assertFunction("round(inverse_weibull_cdf(1.0, 0.6, 0.91), 2)", DOUBLE, 1.44); assertInvalidFunction("inverse_weibull_cdf(0, 3, 0.5)", "inverseWeibullCdf Function: a must be greater than 0"); assertInvalidFunction("inverse_weibull_cdf(3, 0, 0.5)", "inverseWeibullCdf Function: b must be greater than 0"); assertInvalidFunction("inverse_weibull_cdf(3, 5, -0.1)", "inverseWeibullCdf Function: p must be in the interval [0, 1]"); assertInvalidFunction("inverse_weibull_cdf(3, 5, 1.1)", "inverseWeibullCdf Function: p must be in the interval [0, 1]"); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline(); // To find which instants are conflicting, we apply the following logic // 1. Get completed instants timeline only for commits that have happened since the last successful write. // 2. Get any scheduled or completed compaction or clustering operations that have started and/or finished // after the current instant. We need to check for write conflicts since they may have mutated the same files // that are being newly created by the current write. Stream<HoodieInstant> completedCommitsInstantStream = activeTimeline .getCommitsTimeline() .filterCompletedInstants() .findInstantsAfter(lastSuccessfulInstant.isPresent() ? lastSuccessfulInstant.get().getTimestamp() : HoodieTimeline.INIT_INSTANT_TS) .getInstantsAsStream(); Stream<HoodieInstant> compactionAndClusteringPendingTimeline = activeTimeline .filterPendingReplaceClusteringAndCompactionTimeline() .filter(instant -> ClusteringUtils.isClusteringInstant(activeTimeline, instant) || HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction())) .findInstantsAfter(currentInstant.getTimestamp()) .getInstantsAsStream(); return Stream.concat(completedCommitsInstantStream, compactionAndClusteringPendingTimeline); }
@Test public void testConcurrentWriteAndCompactionScheduledEarlier() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); // compaction 1 gets scheduled String newInstantTime = metaClient.createNewInstantTime(); createCompaction(newInstantTime, metaClient); // consider commits before this are all successful HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); SimpleConcurrentFileWritesConflictResolutionStrategy strategy = new SimpleConcurrentFileWritesConflictResolutionStrategy(); HoodieCommitMetadata currentMetadata = createCommitMetadata(currentWriterInstant); metaClient.reloadActiveTimeline(); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // writer 1 should not conflict with an earlier scheduled compaction 1 with the same file ids Assertions.assertTrue(candidateInstants.size() == 0); }
@Override public boolean processArgument(final ShenyuRequest shenyuRequest, final Annotation annotation, final Object arg) { RequestTemplate requestTemplate = shenyuRequest.getRequestTemplate(); CookieValue cookie = ANNOTATION.cast(annotation); String name = cookie.value().trim(); checkState(emptyToNull(name) != null, "Cookie.name() was empty on parameter %s", requestTemplate.getMethod()); Collection<String> cookieExpression = requestTemplate.getHeaders().getOrDefault(HttpHeaders.COOKIE, Lists.newArrayList()); cookieExpression.add(String.format("%s=%s", name, arg)); Map<String, Collection<String>> headers = shenyuRequest.getHeaders(); headers.compute(HttpHeaders.COOKIE, (key, old) -> { if (CollectionUtils.isEmpty(old)) { return cookieExpression; } CollectionUtils.addAll(old, cookieExpression); return old; }); shenyuRequest.setHeaders(headers); return true; }
@Test public void processArgumentNullTest() { final CookieValue cookie = spy(CookieValue.class); when(cookie.value()).thenReturn(""); Assert.assertThrows(IllegalStateException.class, () -> processor.processArgument(request, cookie, "")); }
public boolean start() { if (scheduler == null) { LOG.warn("scheduler is NULL."); return false; } // open server socket try { serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(NetUtils.getSockAddrBasedOnCurrIpVersion(port), 2048); serverChannel.configureBlocking(true); } catch (IOException e) { LOG.warn("Open MySQL network service failed.", e); return false; } // start accept thread listener = ThreadPoolManager.newDaemonCacheThreadPool(1, "MySQL-Protocol-Listener", true); running = true; listenerFuture = listener.submit(new Listener()); return true; }
@Test public void testInvalidParam() throws IOException { ServerSocket socket = new ServerSocket(0); int port = socket.getLocalPort(); socket.close(); MysqlServer server = new MysqlServer(port, null, null); Assert.assertFalse(server.start()); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = true; boolean containsNull = false; // Spec. definition: return false if any item is false, else true if all items are true, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result &= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeListParamReturnFalse() { FunctionTestUtil.assertResult(allFunction.invoke(Arrays.asList(Boolean.TRUE, Boolean.FALSE)), false); FunctionTestUtil.assertResult(allFunction.invoke(Arrays.asList(Boolean.TRUE, null, Boolean.FALSE)), false); }
public boolean setProperties(Namespace namespace, Map<String, String> properties) { return updateProperties(namespace, props -> props.putAll(properties)); }
@Test public void testSetProperties() throws NessieConflictException, NessieNotFoundException { String branch = "setPropertiesBranch"; createBranch(branch); Map<String, String> catalogOptions = Map.of( CatalogProperties.USER, "iceberg-user", CatalogProperties.APP_ID, "iceberg-nessie"); NessieIcebergClient client = new NessieIcebergClient(api, branch, null, catalogOptions); Namespace ns = Namespace.of("a"); client.createNamespace(ns, Map.of("k1", "v1a")); assertThat(client.setProperties(ns, Map.of("k1", "v1b", "k2", "v2"))).isTrue(); assertThat(client.loadNamespaceMetadata(ns)) .hasSize(2) .containsEntry("k1", "v1b") .containsEntry("k2", "v2"); List<LogResponse.LogEntry> entries = api.getCommitLog().refName(branch).get().getLogEntries(); assertThat(entries) .isNotEmpty() .first() .extracting(LogResponse.LogEntry::getCommitMeta) .satisfies( meta -> { assertThat(meta.getMessage()).contains("update namespace a"); assertThat(meta.getAuthor()).isEqualTo("iceberg-user"); assertThat(meta.getProperties()) .containsEntry(NessieUtil.APPLICATION_TYPE, "iceberg") .containsEntry(CatalogProperties.APP_ID, "iceberg-nessie"); }); }
public static <T> List<List<T>> partition(Collection<T> values, int partitionSize) { if (values == null) { return null; } else if (values.isEmpty()) { return Collections.emptyList(); } List<T> valuesList; if (values instanceof List) { valuesList = (List<T>) values; } else { valuesList = new ArrayList<>(values); } int valuesSize = values.size(); if (valuesSize <= partitionSize) { return Collections.singletonList(valuesList); } List<List<T>> safeValuesList = new ArrayList<>(); consumePartitions(values, partitionSize, safeValuesList::add); return safeValuesList; }
@Test void partitionNullCollection() { assertThat(CollectionUtil.partition(null, 10)).isNull(); }
@Override public CloseableIterator<ColumnarBatch> readJsonFiles( CloseableIterator<FileStatus> scanFileIter, StructType physicalSchema, Optional<Predicate> predicate) { return new CloseableIterator<>() { private String currentFile; // index of the current line being read from the current read json list, -1 means no line is read yet private int currentReadLine = -1; private List<JsonNode> currentReadJsonList = Lists.newArrayList(); @Override public void close() { Utils.closeCloseables(scanFileIter); currentReadLine = -1; currentReadJsonList = null; } @Override public boolean hasNext() { if (hasNextToConsume()) { return true; // we have un-consumed last read line } // There is no file in reading or the current file being read has no more data // initialize the next file reader or return false if there are no more files to // read. try { tryGetNextFileJson(); } catch (Exception ex) { throw new KernelEngineException( format("Error reading JSON file: %s", currentFile), ex); } return hasNextToConsume(); } private boolean hasNextToConsume() { return currentReadLine != -1 && !currentReadJsonList.isEmpty() && currentReadLine < currentReadJsonList.size(); } @Override public ColumnarBatch next() { try (Timer ignored = Tracers.watchScope(Tracers.get(), EXTERNAL, "DeltaLakeJsonHandler.JsonToColumnarBatch")) { if (!hasNextToConsume()) { throw new NoSuchElementException(); } List<Row> rows = new ArrayList<>(); int currentBatchSize = 0; do { // hasNext already reads the next file and keeps it in member variable `cachedJsonList` JsonNode jsonNode = currentReadJsonList.get(currentReadLine); Row row = new io.delta.kernel.defaults.internal.data.DefaultJsonRow( (ObjectNode) jsonNode, physicalSchema); rows.add(row); currentBatchSize++; currentReadLine++; } while (currentBatchSize < maxBatchSize && hasNext()); return new io.delta.kernel.defaults.internal.data.DefaultRowBasedColumnarBatch( physicalSchema, rows); } } private void tryGetNextFileJson() throws ExecutionException, IOException { if (scanFileIter.hasNext()) { currentFile = scanFileIter.next().getPath(); Path filePath = new Path(currentFile); if (filePath.getName().equals(LAST_CHECKPOINT_FILE_NAME)) { // can not read last_checkpoint file from cache currentReadJsonList = readJsonFile(currentFile, hadoopConf); } else { currentReadJsonList = jsonCache.get(currentFile); } currentReadLine = 0; } } }; }
@Test public void testReadLastCheckPoint() { String path = deltaLakePath + "/_last_checkpoint"; DeltaLakeJsonHandler deltaLakeJsonHandler = new DeltaLakeJsonHandler(hdfsConfiguration, jsonCache); StructType readSchema = CheckpointMetaData.READ_SCHEMA; FileStatus fileStatus = FileStatus.of(path, 0, 0); try (CloseableIterator<ColumnarBatch> jsonIter = deltaLakeJsonHandler.readJsonFiles( Utils.singletonCloseableIterator(fileStatus), readSchema, Optional.empty())) { Optional<Row> checkpointRow = InternalUtils.getSingularRow(jsonIter); Assert.assertTrue(checkpointRow.isPresent()); Row row = checkpointRow.get(); Assert.assertEquals(row.getLong(0), 30); } catch (IOException e) { Assert.fail(); } Assert.assertTrue(jsonCache.asMap().isEmpty()); }
public static synchronized void i(final String tag, String text, Object... args) { if (msLogger.supportsI()) { String msg = getFormattedString(text, args); msLogger.i(tag, msg); addLog(LVL_I, tag, msg); } }
@Test public void testI() throws Exception { Logger.i("mTag", "Text with %d digits", 0); Mockito.verify(mMockLog).i("mTag", "Text with 0 digits"); Logger.i("mTag", "Text with no digits"); Mockito.verify(mMockLog).i("mTag", "Text with no digits"); }
@Override public int create(String path, long mode, FuseFileInfo fi) { int originalFlags = fi.flags.get(); fi.flags.set(OpenFlags.O_WRONLY.intValue() | fi.flags.get()); return AlluxioFuseUtils.call(LOG, () -> createOrOpenInternal(path, fi, mode), "Fuse.Create", "path=%s,mode=%o,flags=0x%x", path, mode, originalFlags); }
@Test @DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu") @Ignore public void create() throws Exception { // "create" checks if the file already exists first when(mFileSystem.getStatus(any(AlluxioURI.class))) .thenThrow(mock(FileDoesNotExistException.class)); mFileInfo.flags.set(O_WRONLY.intValue()); mFuseFs.create("/foo/bar", 0, mFileInfo); AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar"); verify(mFileSystem).createFile(expectedPath, CreateFilePOptions.newBuilder() .setMode(new alluxio.security.authorization.Mode((short) 0).toProto()) .build()); }
@Override public Committer closeForCommit() throws IOException { lock(); try { uploadCurrentPart(); return upload.getCommitter(); } finally { unlock(); } }
@Test public void testNoDataWritten() throws IOException { RecoverableFsDataOutputStream.Committer committer = fsDataOutputStream.closeForCommit(); committer.commit(); // will not create empty object assertFalse(fs.exists(objectPath)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { final AttributedList<Path> list = new AttributedList<>(); for(RootFolder root : session.roots()) { switch(root.getRootFolderType()) { case 0: // My Files case 1: // Common list.add(new Path(directory, PathNormalizer.name(root.getName()), EnumSet.of(Path.Type.directory, Path.Type.volume), attributes.toAttributes(root))); break; } listener.chunk(directory, list); } return list; } else { try { final AttributedList<Path> children = new AttributedList<>(); int pageIndex = 0; int fileCount = 0; FileContents files; do { files = new FilesApi(this.session.getClient()).filesGetById(URIEncoder.encode(fileid.getFileId(directory)), pageIndex, chunksize, "Name asc", 0, // All true, false, false ); for(File f : files.getFiles()) { final PathAttributes attrs = attributes.toAttributes(f); final EnumSet<Path.Type> type = (f.getFlags() & 1) == 1 ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file); children.add(new Path(directory, f.getName(), type, attrs)); } pageIndex++; fileCount += files.getFiles().size(); listener.chunk(directory, children); } while(fileCount < files.getTotalRowCount()); return children; } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory); } } }
@Test public void testListEmptyFolder() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new Path("/My files", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final AtomicBoolean callback = new AtomicBoolean(); assertTrue(new StoregateListService(session, nodeid).list(folder, new DisabledListProgressListener() { @Override public void chunk(final Path parent, final AttributedList<Path> list) { assertNotSame(AttributedList.EMPTY, list); callback.set(true); } }).isEmpty()); assertTrue(callback.get()); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } }
@Test public void testStringToBytes() { assertArrayEquals(Utils.utf8(SAMPLE_STRING), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING)); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskSinkConnector() { // Given: final String query = "CREATE Sink CONNECTOR `test-connector` WITH (" + " \"connector.class\" = 'PostgresSource', \n" + " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n" + " \"mode\"='bulk',\n" + " \"topic.prefix\"='jdbc-',\n" + " \"table.whitelist\"='users',\n" + " \"key\"='username');"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "CREATE SINK CONNECTOR `test-connector` WITH " + "(\"connector.class\"='PostgresSource', " + "'connection.url'='[string]', " + "\"mode\"='[string]', " + "\"topic.prefix\"='[string]', " + "\"table.whitelist\"='[string]', " + "\"key\"='[string]');"; assertThat(maskedQuery, is(expected)); }
public void formatSource(CharSource input, CharSink output) throws FormatterException, IOException { // TODO(cushon): proper support for streaming input/output. Input may // not be feasible (parsing) but output should be easier. output.write(formatSource(input.read())); }
@Test public void removeTrailingTabsInComments() throws Exception { assertThat( new Formatter() .formatSource( "class Foo {\n" + " void f() {\n" + " int x = 0; // comment\t\t\t\n" + " return;\n" + " }\n" + "}\n")) .isEqualTo( "class Foo {\n" + " void f() {\n" + " int x = 0; // comment\n" + " return;\n" + " }\n" + "}\n"); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void changeOnMyIssues_notification_is_enable_at_project_level() { NotificationDispatcherMetadata metadata = ChangesOnMyIssueNotificationHandler.newMetadata(); assertThat(metadata.getProperty(PER_PROJECT_NOTIFICATION)).isEqualTo("true"); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testInfoXML() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") .path("info/").accept(MediaType.APPLICATION_XML) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8, response.getType().toString()); String xml = response.getEntity(String.class); verifyHSInfoXML(xml, appContext); }
@Override public JsonObject deepCopy() { JsonObject result = new JsonObject(); for (Map.Entry<String, JsonElement> entry : members.entrySet()) { result.add(entry.getKey(), entry.getValue().deepCopy()); } return result; }
@Test public void testDeepCopy() { JsonObject original = new JsonObject(); JsonArray firstEntry = new JsonArray(); original.add("key", firstEntry); JsonObject copy = original.deepCopy(); firstEntry.add(new JsonPrimitive("z")); assertThat(original.get("key").getAsJsonArray()).hasSize(1); assertThat(copy.get("key").getAsJsonArray()).hasSize(0); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_map_of_primitive_to_list_of_object() throws ParseException { DataTable table = parse("", " | Annie M. G. | 1995-03-21 | 1911-03-20 |", " | Roald | 1990-09-13 | 1916-09-13 |", " | Astrid | 1907-10-14 | 1907-11-14 |"); Map<String, List<Date>> expected = new HashMap<String, List<Date>>() { { put("Annie M. G.", asList(SIMPLE_DATE_FORMAT.parse("1995-03-21"), SIMPLE_DATE_FORMAT.parse("1911-03-20"))); put("Roald", asList(SIMPLE_DATE_FORMAT.parse("1990-09-13"), SIMPLE_DATE_FORMAT.parse("1916-09-13"))); put("Astrid", asList(SIMPLE_DATE_FORMAT.parse("1907-10-14"), SIMPLE_DATE_FORMAT.parse("1907-11-14"))); } }; registry.defineDataTableType(DATE_TABLE_CELL_TRANSFORMER); assertEquals(expected, converter.convert(table, MAP_OF_STRING_TO_LIST_OF_DATE)); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseB3SingleFormat_zero_parentId() { assertThat(parseB3SingleFormat(traceId + "-" + spanId + "-1-0000000000000000").context()) .isEqualToComparingFieldByField( parseB3SingleFormat(traceId + "-" + spanId + "-1").context() ); }
@GET @ApiOperation(value = "Retrieve a search query") @Path("{id}") @Produces({MediaType.APPLICATION_JSON, SEARCH_FORMAT_V1}) public SearchDTO getSearch(@ApiParam(name = "id") @PathParam("id") String searchId, @Context SearchUser searchUser) { final Search search = searchDomain.getForUser(searchId, searchUser) .orElseThrow(() -> new NotFoundException("Search with id " + searchId + " does not exist")); return SearchDTO.fromSearch(search); }
@Test public void getSearchThrowsNotFoundIfSearchDoesntExist() { final SearchDomain searchDomain = mockSearchDomain(Optional.empty()); final SearchResource resource = new SearchResource(searchDomain, searchExecutor, searchJobService, eventBus, clusterConfigService); assertThatExceptionOfType(NotFoundException.class) .isThrownBy(() -> resource.getSearch("god", searchUser)) .withMessageContaining("god"); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldThrowIfStoreFetchFails_fetchAll() { // Given: when(kafkaStreams.query(any())) .thenThrow(new MaterializationTimeOutException("Boom")); // When: final Exception e = assertThrows( MaterializationException.class, () -> table.get(PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS) ); // Then: assertThat(e.getMessage(), containsString( "Boom")); assertThat(e, (instanceOf(MaterializationTimeOutException.class))); }
@Override public BuiltInPreparedQuery prepareQuery(AnalyzerOptions analyzerOptions, String query, Map<String, String> preparedStatements, WarningCollector warningCollector) { Statement wrappedStatement = sqlParser.createStatement(query, createParsingOptions(analyzerOptions)); if (warningCollector.hasWarnings() && analyzerOptions.getWarningHandlingLevel() == AS_ERROR) { throw new PrestoException(WARNING_AS_ERROR, format("Warning handling level set to AS_ERROR. Warnings: %n %s", warningCollector.getWarnings().stream() .map(PrestoWarning::getMessage) .collect(joining(System.lineSeparator())))); } return prepareQuery(analyzerOptions, wrappedStatement, preparedStatements); }
@Test public void testTooManyParameters() { try { Map<String, String> preparedStatements = ImmutableMap.of("my_query", "SELECT * FROM foo where col1 = ?"); QUERY_PREPARER.prepareQuery(testAnalyzerOptions, "EXECUTE my_query USING 1,2", preparedStatements, WarningCollector.NOOP); fail("expected exception"); } catch (SemanticException e) { assertEquals(e.getCode(), INVALID_PARAMETER_USAGE); } }
@Override public Mono<Void> save(MapSession session) { if (invalidateSessionIds.getIfPresent(session.getId()) != null) { return this.deleteById(session.getId()); } return super.save(session) .then(updateIndex(session)); }
@Test void saveTest() { var indexKey = createSession("fake-session-1", "test"); assertThat(sessionRepository.getSessionIdIndexMap()).hasSize(1); assertThat( sessionRepository.getSessionIdIndexMap().containsValue(Set.of(indexKey))).isTrue(); assertThat(sessionRepository.getIndexSessionIdMap()).hasSize(1); assertThat(sessionRepository.getIndexSessionIdMap().containsKey(indexKey)).isTrue(); assertThat(sessionRepository.getIndexSessionIdMap().get(indexKey)).isEqualTo( Set.of("fake-session-1")); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test @SuppressWarnings("unchecked") public void testDecodeDynamicStructDynamicArray() { String rawInput = "0x0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000002" + "6964000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "6e616d6500000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getFooDynamicArrayFunction.getOutputParameters()), Arrays.asList( new DynamicArray( AbiV2TestFixture.Foo.class, new AbiV2TestFixture.Foo("id", "name")))); }
public CredentialRetriever dockerCredentialHelper(String credentialHelper) { return dockerCredentialHelper(Paths.get(credentialHelper)); }
@Test public void testDockerCredentialHelper() throws CredentialRetrievalException { CredentialRetrieverFactory credentialRetrieverFactory = createCredentialRetrieverFactory("registry", "repo"); Assert.assertEquals( Optional.of(FAKE_CREDENTIALS), credentialRetrieverFactory .dockerCredentialHelper(Paths.get("docker-credential-foo")) .retrieve()); Mockito.verify(mockDockerCredentialHelperFactory) .create("registry", Paths.get("docker-credential-foo"), Collections.emptyMap()); Mockito.verify(mockLogger) .accept( LogEvent.lifecycle("Using credential helper docker-credential-foo for registry/repo")); }
CreateConnectorRequest parseConnectorConfigurationFile(String filePath) throws IOException { ObjectMapper objectMapper = new ObjectMapper(); File connectorConfigurationFile = Paths.get(filePath).toFile(); try { Map<String, String> connectorConfigs = objectMapper.readValue( connectorConfigurationFile, new TypeReference<Map<String, String>>() { }); if (!connectorConfigs.containsKey(NAME_CONFIG)) { throw new ConnectException("Connector configuration at '" + filePath + "' is missing the mandatory '" + NAME_CONFIG + "' " + "configuration"); } return new CreateConnectorRequest(connectorConfigs.get(NAME_CONFIG), connectorConfigs, null); } catch (StreamReadException | DatabindException e) { log.debug("Could not parse connector configuration file '{}' into a Map with String keys and values", filePath); } try { objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); CreateConnectorRequest createConnectorRequest = objectMapper.readValue(connectorConfigurationFile, new TypeReference<CreateConnectorRequest>() { }); if (createConnectorRequest.config().containsKey(NAME_CONFIG)) { if (!createConnectorRequest.config().get(NAME_CONFIG).equals(createConnectorRequest.name())) { throw new ConnectException("Connector name configuration in 'config' doesn't match the one specified in 'name' at '" + filePath + "'"); } } else { createConnectorRequest.config().put(NAME_CONFIG, createConnectorRequest.name()); } return createConnectorRequest; } catch (StreamReadException | DatabindException e) { log.debug("Could not parse connector configuration file '{}' into an object of type {}", filePath, CreateConnectorRequest.class.getSimpleName()); } Map<String, String> connectorConfigs = Utils.propsToStringMap(Utils.loadProps(filePath)); if (!connectorConfigs.containsKey(NAME_CONFIG)) { throw new ConnectException("Connector configuration at '" + filePath + "' is missing the mandatory '" + NAME_CONFIG + "' " + "configuration"); } return new CreateConnectorRequest(connectorConfigs.get(NAME_CONFIG), connectorConfigs, null); }
@Test public void testParseJavaPropertiesFile() throws Exception { Properties properties = new Properties(); CONNECTOR_CONFIG.forEach(properties::setProperty); try (FileWriter writer = new FileWriter(connectorConfigurationFile)) { properties.store(writer, null); } CreateConnectorRequest request = connectStandalone.parseConnectorConfigurationFile(connectorConfigurationFile.getAbsolutePath()); assertEquals(CONNECTOR_NAME, request.name()); assertEquals(CONNECTOR_CONFIG, request.config()); assertNull(request.initialState()); }
public static <T> CLARANS<T> fit(T[] data, Distance<T> distance, int k) { return fit(data, distance, k, (int) Math.round(0.0125 * k * (data.length - k))); }
@Test public void testUSPS() throws Exception { System.out.println("USPS"); MathEx.setSeed(19650218); // to get repeatable results. double[][] x = USPS.x; int[] y = USPS.y; double[][] testx = USPS.testx; int[] testy = USPS.testy; CLARANS<double[]> model = CLARANS.fit(x, MathEx::squaredDistance,10); System.out.println(model); double r = RandIndex.of(y, model.y); double r2 = AdjustedRandIndex.of(y, model.y); System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8935, r, 1E-4); assertEquals(0.4610, r2, 1E-4); System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y)); System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y)); System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y)); System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y)); System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y)); System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y)); int[] p = new int[testx.length]; for (int i = 0; i < testx.length; i++) { p[i] = model.predict(testx[i]); } r = RandIndex.of(testy, p); r2 = AdjustedRandIndex.of(testy, p); System.out.format("Testing rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8807, r, 1E-4); assertEquals(0.4016, r2, 1E-4); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
@Deprecated public static <T> ParTask<T> par(Task<? extends T> task1, Task<? extends T> task2) { return vapar(task1, task2); }
@Test public void testManyTimeoutTaskWithoutTimeoutOnAQueue() throws InterruptedException, IOException { final String value = "value"; final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); List<Task<String>> tasks = new ArrayList<Task<String>>(); for (int i = 0; i < 50; i++) { // Task which simulates doing something for 0.5ms and setting response // asynchronously after 5ms. Task<String> t = new BaseTask<String>("test") { @Override protected Promise<? extends String> run(Context context) throws Throwable { final SettablePromise<String> result = Promises.settable(); Thread.sleep(0, 500000); scheduler.schedule(new Runnable() { @Override public void run() { result.done(value); } }, 5, TimeUnit.MILLISECONDS); return result; } }; tasks.add(t.withTimeout(50, TimeUnit.MILLISECONDS)); } // final task runs all the tasks in parallel final Task<?> timeoutTask = Tasks.par(tasks); runAndWait("TestTasks.testManyTimeoutTaskWithoutTimeoutOnAQueue", timeoutTask); scheduler.shutdown(); //tasks should not time out assertEquals(false, timeoutTask.isFailed()); }