focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static JoinWindows ofTimeDifferenceAndGrace(final Duration timeDifference, final Duration afterWindowEnd) { final String timeDifferenceMsgPrefix = prepareMillisCheckFailMsgPrefix(timeDifference, "timeDifference"); final long timeDifferenceMs = validateMillisecondDuration(timeDifference, timeDifferenceMsgPrefix); final String afterWindowEndMsgPrefix = prepareMillisCheckFailMsgPrefix(afterWindowEnd, "afterWindowEnd"); final long afterWindowEndMs = validateMillisecondDuration(afterWindowEnd, afterWindowEndMsgPrefix); return new JoinWindows(timeDifferenceMs, timeDifferenceMs, afterWindowEndMs, true); }
@Test public void gracePeriodShouldEnforceBoundaries() { JoinWindows.ofTimeDifferenceAndGrace(ofMillis(3L), ofMillis(0L)); try { JoinWindows.ofTimeDifferenceAndGrace(ofMillis(3L), ofMillis(-1L)); fail("should not accept negatives"); } catch (final IllegalArgumentException e) { //expected } }
private RemotingCommand unlockBatchMQ(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); UnlockBatchRequestBody requestBody = UnlockBatchRequestBody.decode(request.getBody(), UnlockBatchRequestBody.class); if (requestBody.isOnlyThisBroker() || !this.brokerController.getBrokerConfig().isLockInStrictMode()) { this.brokerController.getRebalanceLockManager().unlockBatch( requestBody.getConsumerGroup(), requestBody.getMqSet(), requestBody.getClientId()); } else { requestBody.setOnlyThisBroker(true); BrokerMemberGroup memberGroup = this.brokerController.getBrokerMemberGroup(); if (memberGroup != null) { Map<Long, String> addrMap = memberGroup.getBrokerAddrs(); for (Long brokerId : addrMap.keySet()) { try { this.brokerController.getBrokerOuterAPI().unlockBatchMQAsync(addrMap.get(brokerId), requestBody, 1000, new UnlockCallback() { @Override public void onSuccess() { } @Override public void onException(Throwable e) { LOGGER.warn("unlockBatchMQ exception on {}, {}", addrMap.get(brokerId), e); } }); } catch (Exception e) { LOGGER.warn("unlockBatchMQ exception on {}, {}", addrMap.get(brokerId), e); } } } } response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testUnlockBatchMQ() throws Exception { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UNLOCK_BATCH_MQ, null); UnlockBatchRequestBody unlockBatchRequestBody = new UnlockBatchRequestBody(); unlockBatchRequestBody.setClientId("11111"); unlockBatchRequestBody.setConsumerGroup("group"); request.setBody(JSON.toJSON(unlockBatchRequestBody).toString().getBytes()); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public static boolean hasFreeSpace(final Path path, final long size) { final long freeSpace = path.toFile().getFreeSpace(); if (freeSpace == 0L && IS_WINDOWS) { // On Windows, SUBST'ed drives report 0L from getFreeSpace(). // The API doc says "The number of unallocated bytes on the partition or 0L if the abstract pathname does not name a partition." // There is no straightforward fix for this and it seems a fix is included in Java 9. // One alternative is to launch and parse a DIR command and look at the reported free space. // This is a temporary fix to get the CI tests going which relies on SUBST'ed drives to manage long paths. logger.warn("Cannot retrieve free space on " + path.toString() + ". This is probably a SUBST'ed drive."); return true; } return freeSpace >= size; }
@Test public void trueIfEnoughSpace() throws Exception { MatcherAssert.assertThat( FsUtil.hasFreeSpace(temp.newFolder().toPath().toAbsolutePath(), 1024L), CoreMatchers.is(true) ); }
public StepInstanceActionResponse bypassStepDependencies( String workflowId, long workflowInstanceId, String stepId, User user, boolean blocking) { WorkflowInstance instance = instanceDao.getLatestWorkflowInstanceRun(workflowId, workflowInstanceId); return actionDao.bypassStepDependencies(instance, stepId, user, blocking); }
@Test public void testBypassDependencies() { when(instance.getStatus()).thenReturn(WorkflowInstance.Status.IN_PROGRESS); stepActionHandler.bypassStepDependencies("sample-minimal-wf", 1, "job1", user, true); verify(actionDao, times(1)).bypassStepDependencies(instance, "job1", user, true); }
@Override public List<PartitionInfo> getPartitions(Table table, List<String> partitionNames) { Map<String, Partition> partitionMap = Maps.newHashMap(); IcebergTable icebergTable = (IcebergTable) table; PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils. createMetadataTableInstance(icebergTable.getNativeTable(), org.apache.iceberg.MetadataTableType.PARTITIONS); if (icebergTable.isUnPartitioned()) { try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) { for (FileScanTask task : tasks) { // partitionsTable Table schema : // record_count, // file_count, // total_data_file_size_in_bytes, // position_delete_record_count, // position_delete_file_count, // equality_delete_record_count, // equality_delete_file_count, // last_updated_at, // last_updated_snapshot_id CloseableIterable<StructLike> rows = task.asDataTask().rows(); for (StructLike row : rows) { // Get the last updated time of the table according to the table schema long lastUpdated = -1; try { lastUpdated = row.get(7, Long.class); } catch (NullPointerException e) { LOG.error("The table [{}] snapshot [{}] has been expired", icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), e); } Partition partition = new Partition(lastUpdated); return ImmutableList.of(partition); } } // for empty table, use -1 as last updated time return ImmutableList.of(new Partition(-1)); } catch (IOException e) { throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e); } } else { // For partition table, we need to get all partitions from PartitionsTable. try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) { for (FileScanTask task : tasks) { // partitionsTable Table schema : // partition, // spec_id, // record_count, // file_count, // total_data_file_size_in_bytes, // position_delete_record_count, // position_delete_file_count, // equality_delete_record_count, // equality_delete_file_count, // last_updated_at, // last_updated_snapshot_id CloseableIterable<StructLike> rows = task.asDataTask().rows(); for (StructLike row : rows) { // Get the partition data/spec id/last updated time according to the table schema StructProjection partitionData = row.get(0, StructProjection.class); int specId = row.get(1, Integer.class); PartitionSpec spec = icebergTable.getNativeTable().specs().get(specId); String partitionName = PartitionUtil.convertIcebergPartitionToPartitionName(spec, partitionData); long lastUpdated = -1; try { lastUpdated = row.get(9, Long.class); } catch (NullPointerException e) { LOG.error("The table [{}.{}] snapshot [{}] has been expired", icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), partitionName, e); } Partition partition = new Partition(lastUpdated); partitionMap.put(partitionName, partition); } } } catch (IOException e) { throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e); } } ImmutableList.Builder<PartitionInfo> partitions = ImmutableList.builder(); partitionNames.forEach(partitionName -> partitions.add(partitionMap.get(partitionName))); return partitions.build(); }
@Test public void testGetPartitionsWithExpireSnapshot() { mockedNativeTableB.newAppend().appendFile(FILE_B_1).commit(); mockedNativeTableB.refresh(); mockedNativeTableB.newAppend().appendFile(FILE_B_2).commit(); mockedNativeTableB.refresh(); mockedNativeTableB.expireSnapshots().expireOlderThan(System.currentTimeMillis()).commit(); mockedNativeTableB.refresh(); IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG); CachingIcebergCatalog cachingIcebergCatalog = new CachingIcebergCatalog( CATALOG_NAME, icebergHiveCatalog, DEFAULT_CATALOG_PROPERTIES, Executors.newSingleThreadExecutor()); IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, cachingIcebergCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null); IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db", "table", "", Lists.newArrayList(), mockedNativeTableB, Maps.newHashMap()); List<PartitionInfo> partitions = metadata.getPartitions(icebergTable, ImmutableList.of("k2=2", "k2=3")); Assert.assertEquals(2, partitions.size()); Assert.assertTrue(partitions.stream().anyMatch(x -> x.getModifiedTime() == -1)); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testLast4hoursArtificialReferenceDSTChange() throws Exception { DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZone(DateTimeZone.forID("Antarctica/Palmer")).parseDateTime("28.03.2021 03:45:23"); NaturalDateParser.Result last4 = naturalDateParserAntarctica.parse("last 4 hours", reference.toDate()); assertThat(last4.getFrom()).as("from should be exactly 4 hours in the past").isEqualTo(reference.minusHours(4)); assertThat(last4.getTo()).as("to should be the reference date").isEqualTo(reference); reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZone(DateTimeZone.forID("Antarctica/Palmer")).parseDateTime("31.10.2021 03:45:23"); last4 = naturalDateParserAntarctica.parse("last 4 hours", reference.toDate()); assertThat(last4.getFrom()).as("from should be exactly 4 hours in the past").isEqualTo(reference.minusHours(4)); assertThat(last4.getTo()).as("to should be the reference date").isEqualTo(reference); }
private String buildServiceKey(Invoker<?> invoker, Invocation invocation) { URL url = invoker.getUrl(); StringBuilder sb = new StringBuilder(128); sb.append(url.getAddress()).append(":").append(invocation.getProtocolServiceKey()); return sb.toString(); }
@Test @Order(1) void testSelectByAdaptive() { int sumInvoker1 = 0; int sumInvoker2 = 0; int sumInvoker5 = 0; int loop = 10000; scopeModel = ApplicationModel.defaultModel(); AdaptiveLoadBalance lb = new AdaptiveLoadBalance(scopeModel); lb.select(weightInvokersSR, null, weightTestInvocation); for (int i = 0; i < loop; i++) { Invoker selected = lb.select(weightInvokersSR, null, weightTestInvocation); Map<String, String> metricsMap = new HashMap<>(); String idKey = buildServiceKey(selected); if (selected.getUrl().getProtocol().equals("test1")) { sumInvoker1++; metricsMap.put("rt", "10"); metricsMap.put("load", "10"); metricsMap.put("curTime", String.valueOf(System.currentTimeMillis() - 10)); getAdaptiveMetricsInstance().addConsumerSuccess(idKey); } if (selected.getUrl().getProtocol().equals("test2")) { sumInvoker2++; metricsMap.put("rt", "100"); metricsMap.put("load", "40"); metricsMap.put("curTime", String.valueOf(System.currentTimeMillis() - 100)); getAdaptiveMetricsInstance().addConsumerSuccess(idKey); } if (selected.getUrl().getProtocol().equals("test5")) { metricsMap.put("rt", "5000"); metricsMap.put("load", "400"); // 400% metricsMap.put("curTime", String.valueOf(System.currentTimeMillis() - 5000)); getAdaptiveMetricsInstance().addErrorReq(idKey); sumInvoker5++; } getAdaptiveMetricsInstance().setProviderMetrics(idKey, metricsMap); } Map<Invoker<LoadBalanceBaseTest>, Integer> weightMap = weightInvokersSR.stream() .collect(Collectors.toMap( Function.identity(), e -> Integer.valueOf(e.getUrl().getParameter("weight")))); Integer totalWeight = weightMap.values().stream().reduce(0, Integer::sum); // max deviation = expectWeightValue * 2 int expectWeightValue = loop / totalWeight; int maxDeviation = expectWeightValue * 2; double beta = 0.5; // this EMA is an approximate value double ewma1 = beta * 50 + (1 - beta) * 10; double ewma2 = beta * 50 + (1 - beta) * 100; double ewma5 = beta * 50 + (1 - beta) * 5000; AtomicInteger weight1 = new AtomicInteger(); AtomicInteger weight2 = new AtomicInteger(); AtomicInteger weight5 = new AtomicInteger(); weightMap.forEach((k, v) -> { if (k.getUrl().getProtocol().equals("test1")) { weight1.set(v); } else if (k.getUrl().getProtocol().equals("test2")) { weight2.set(v); } else if (k.getUrl().getProtocol().equals("test5")) { weight5.set(v); } }); Assertions.assertEquals(sumInvoker1 + sumInvoker2 + sumInvoker5, loop, "select failed!"); Assertions.assertTrue( Math.abs(sumInvoker1 / (weightMap.get(weightInvoker1) * ewma1) - expectWeightValue) < maxDeviation, "select failed!"); Assertions.assertTrue( Math.abs(sumInvoker2 / (weightMap.get(weightInvoker2) * ewma2) - expectWeightValue) < maxDeviation, "select failed!"); Assertions.assertTrue( Math.abs(sumInvoker5 / (weightMap.get(weightInvoker5) * ewma5) - expectWeightValue) < maxDeviation, "select failed!"); }
@Override public List<AuthorizationResult> authorize( AuthorizableRequestContext requestContext, List<Action> actions) { List<AuthorizationResult> results = new ArrayList<>(actions.size()); StandardAuthorizerData curData = data; for (Action action : actions) { AuthorizationResult result = curData.authorize(requestContext, action); results.add(result); } return results; }
@Test public void testAuthorizationWithManyAcls() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); addManyAcls(authorizer); assertEquals(Arrays.asList(ALLOWED, DENIED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), Arrays.asList(newAction(READ, TOPIC, "green1"), newAction(WRITE, GROUP, "wheel")))); assertEquals(Arrays.asList(DENIED, ALLOWED, DENIED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), Arrays.asList(newAction(READ, TOPIC, "alpha"), newAction(WRITE, GROUP, "arbitrary"), newAction(READ, TOPIC, "ala")))); }
private Mono<ServerResponse> previewPost(ServerRequest request) { final var name = request.pathVariable("name"); return currentAuthenticatedUserName() .flatMap(principal -> client.fetch(Post.class, name)) .flatMap(post -> { String snapshotName = request.queryParam(SNAPSHOT_NAME_PARAM) .orElse(post.getSpec().getHeadSnapshot()); return convertToPostVo(post, snapshotName); }) .flatMap(post -> canPreview(post.getContributors()) .doOnNext(canPreview -> { if (!canPreview) { throw new NotFoundException("Post not found."); } }) .thenReturn(post) ) // Check permissions before throwing this exception .switchIfEmpty(Mono.error(() -> new NotFoundException("Post not found."))) .flatMap(postVo -> { String template = postVo.getSpec().getTemplate(); Map<String, Object> model = ModelMapUtils.postModel(postVo); return viewNameResolver.resolveViewNameOrDefault(request, template, DefaultTemplateEnum.POST.getValue()) .flatMap(templateName -> ServerResponse.ok().render(templateName, model)); }); }
@Test @WithMockUser(username = "testuser") public void previewPost() { Post post = new Post(); post.setMetadata(new Metadata()); post.getMetadata().setName("post1"); post.setSpec(new Post.PostSpec()); post.getSpec().setOwner("testuser"); post.getSpec().setHeadSnapshot("snapshot1"); post.getSpec().setBaseSnapshot("snapshot2"); post.getSpec().setTemplate("postTemplate"); when(client.fetch(eq(Post.class), eq("post1"))).thenReturn(Mono.just(post)); PostVo postVo = PostVo.from(post); postVo.setContributors(contributorVos()); when(postPublicQueryService.convertToVo(eq(post), eq(post.getSpec().getHeadSnapshot()))) .thenReturn(Mono.just(postVo)); when(viewNameResolver.resolveViewNameOrDefault(any(ServerRequest.class), eq("postTemplate"), eq("post"))).thenReturn(Mono.just("postView")); webTestClient.get().uri("/preview/posts/post1") .exchange() .expectStatus().isOk(); verify(viewResolver).resolveViewName(any(), any()); verify(postPublicQueryService).convertToVo(eq(post), eq(post.getSpec().getHeadSnapshot())); verify(client).fetch(eq(Post.class), eq("post1")); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void issuseI7OTCUTest() { // MAC Chrome 浏览器 ua final String uaStr = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("Chrome", ua.getBrowser().toString()); assertEquals("114.0.0.0", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("OSX", ua.getOs().toString()); assertEquals("10_15_7", ua.getOsVersion()); assertEquals("Mac", ua.getPlatform().toString()); assertFalse(ua.isMobile()); // iphone Chrome 浏览器ua final String uaStr2 = "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"; final UserAgent ua2 = UserAgentUtil.parse(uaStr2); assertEquals("Chrome", ua2.getBrowser().toString()); assertEquals("56.0.2924.75", ua2.getVersion()); assertEquals("Webkit", ua2.getEngine().toString()); assertEquals("602.1.50", ua2.getEngineVersion()); assertEquals("iPhone", ua2.getOs().toString()); assertEquals("10_3", ua2.getOsVersion()); assertEquals("iPhone", ua2.getPlatform().toString()); assertTrue(ua2.isMobile()); }
public static Maxent fit(int p, int[][] x, int[] y) { return fit(p, x, y, new Properties()); }
@Test public void testProtein() throws Exception { System.out.println("protein"); Maxent model = Maxent.fit(Protein.p, Protein.x, Protein.y); int[] prediction = model.predict(Protein.testx); int error = Error.of(prediction, Protein.testy); System.out.format("The error is %d of %d%n", error, Protein.testx.length); assertEquals(1339, error); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
@VisibleForTesting static void setupAndModifyConfiguration( Configuration configuration, String currDir, Map<String, String> variables) throws Exception { final String localDirs = variables.get(Environment.LOCAL_DIRS.key()); LOG.info("Current working/local Directory: {}", localDirs); BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs); setupConfigurationFromVariables(configuration, currDir, variables); }
@Test void testTaskManagerNodeIdConfiguration() throws Exception { final String resourceDirPath = Paths.get("src", "test", "resources").toAbsolutePath().toString(); Configuration configuration = new Configuration(); YarnTaskExecutorRunner.setupAndModifyConfiguration( configuration, resourceDirPath, Collections.singletonMap(YarnResourceManagerDriver.ENV_FLINK_NODE_ID, "test")); assertThat(configuration.get(TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID)) .isEqualTo("test"); }
public CommittableOffsets committableOffsets() { Map<Map<String, Object>, Map<String, Object>> offsets = new HashMap<>(); int totalCommittableMessages = 0; int totalUncommittableMessages = 0; int largestDequeSize = 0; Map<String, Object> largestDequePartition = null; for (Map.Entry<Map<String, Object>, Deque<SubmittedRecord>> entry : records.entrySet()) { Map<String, Object> partition = entry.getKey(); Deque<SubmittedRecord> queuedRecords = entry.getValue(); int initialDequeSize = queuedRecords.size(); if (canCommitHead(queuedRecords)) { Map<String, Object> offset = committableOffset(queuedRecords); offsets.put(partition, offset); } int uncommittableMessages = queuedRecords.size(); int committableMessages = initialDequeSize - uncommittableMessages; totalCommittableMessages += committableMessages; totalUncommittableMessages += uncommittableMessages; if (uncommittableMessages > largestDequeSize) { largestDequeSize = uncommittableMessages; largestDequePartition = partition; } } // Clear out all empty deques from the map to keep it from growing indefinitely records.values().removeIf(Deque::isEmpty); return new CommittableOffsets(offsets, totalCommittableMessages, totalUncommittableMessages, records.size(), largestDequeSize, largestDequePartition); }
@Test public void testNoRecords() { CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); assertTrue(committableOffsets.isEmpty()); committableOffsets = submittedRecords.committableOffsets(); assertTrue(committableOffsets.isEmpty()); committableOffsets = submittedRecords.committableOffsets(); assertTrue(committableOffsets.isEmpty()); assertNoRemainingDeques(); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } }
@Test public void testBytesToString() { SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes()); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(SAMPLE_STRING, data.value()); }
private static void convertToTelemetry(JsonElement jsonElement, long systemTs, Map<Long, List<KvEntry>> result, PostTelemetryMsg.Builder builder) { if (jsonElement.isJsonObject()) { parseObject(systemTs, result, builder, jsonElement.getAsJsonObject()); } else if (jsonElement.isJsonArray()) { jsonElement.getAsJsonArray().forEach(je -> { if (je.isJsonObject()) { parseObject(systemTs, result, builder, je.getAsJsonObject()); } else { throw new JsonSyntaxException(CAN_T_PARSE_VALUE + je); } }); } else { throw new JsonSyntaxException(CAN_T_PARSE_VALUE + jsonElement); } }
@Test public void testParseBigDecimalAsDouble() { var result = JsonConverter.convertToTelemetry(JsonParser.parseString("{\"meterReadingDelta\": 101E-1}"), 0L); Assertions.assertEquals(10.1, result.get(0L).get(0).getDoubleValue().get(), 0.0); }
public boolean isNameValid(String name) { return name != null && name.length() <= MAX_LENGTH && XmlUtils.matchUsingRegex(NAME_TYPE_PATTERN_REGEX, name); }
@Test public void shouldValidateNameBasedOnLength() { assertThat(new NameTypeValidator().isNameValid("name"), is(true)); assertThat(new NameTypeValidator().isNameValid(nameOfLength(255)), is(true)); assertThat(new NameTypeValidator().isNameValid(nameOfLength(256)), is(false)); }
@Override public void delete(SegmentDirectoryLoaderContext segmentLoaderContext) throws Exception { String segmentName = segmentLoaderContext.getSegmentName(); String[] lastTierPath = getSegmentTierPersistedLocally(segmentName, segmentLoaderContext); File lastDataDir = lastTierPath[1] != null ? new File(lastTierPath[1]) : getDefaultDataDir(segmentLoaderContext); if (lastDataDir.exists()) { FileUtils.deleteQuietly(lastDataDir); LOGGER.info("Deleted segment directory {} on last known tier: {}", lastDataDir, TierConfigUtils.normalizeTierName(lastTierPath[0])); } deleteSegmentTierPersistedLocally(segmentName, segmentLoaderContext); }
@Test(expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = ".*unexpected version.*") public void testDeleteSegmentBadTrackFile() throws Exception { TierBasedSegmentDirectoryLoader loader = new TierBasedSegmentDirectoryLoader(); SegmentDirectoryLoaderContext loaderCtx = new SegmentDirectoryLoaderContext.Builder().setSegmentName("seg01") .setTableDataDir(TEMP_DIR.getAbsolutePath() + "/" + TABLE_NAME_WITH_TYPE).build(); // Corrupt the tier track file. File tierTrackFile = new File(TEMP_DIR.getAbsolutePath() + "/" + TABLE_NAME_WITH_TYPE, "seg01.tier"); FileUtils.write(tierTrackFile, "a lot of bad data", StandardCharsets.UTF_8); loader.delete(loaderCtx); }
List<MethodSpec> buildFunctions(AbiDefinition functionDefinition) throws ClassNotFoundException { return buildFunctions(functionDefinition, true); }
@Test public void testBuildFunctionConstantSingleValueReturnAndTransaction() throws Exception { AbiDefinition functionDefinition = new AbiDefinition( true, Arrays.asList(new NamedType("param", "uint8")), "functionName", Arrays.asList(new NamedType("result", "int8")), "type", false); List<MethodSpec> methodSpecs = solidityFunctionWrapperBoth.buildFunctions(functionDefinition); String expectedCall = "public org.web3j.protocol.core.RemoteFunctionCall<java.math.BigInteger> call_functionName(\n" + " java.math.BigInteger param) {\n" + " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n" + " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n" + " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.generated.Int8>() {}));\n" + " return executeRemoteCallSingleValueReturn(function, java.math.BigInteger.class);\n" + "}\n"; String expectedSend = "public org.web3j.protocol.core.RemoteFunctionCall<org.web3j.protocol.core.methods.response.TransactionReceipt> send_functionName(\n" + " java.math.BigInteger param) {\n" + " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n" + " FUNC_FUNCTIONNAME, \n" + " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n" + " java.util.Collections.<org.web3j.abi.TypeReference<?>>emptyList());\n" + " return executeRemoteCallTransaction(function);\n" + "}\n"; assertEquals(2, methodSpecs.size()); assertEquals(expectedCall, methodSpecs.get(0).toString()); assertEquals(expectedSend, methodSpecs.get(1).toString()); }
public static Level toLevel(String sArg) { return toLevel(sArg, Level.DEBUG); }
@Test public void withSpaceSuffix() { assertEquals(Level.INFO, Level.toLevel("INFO ")); }
@Override public boolean getTcpNoDelay() { return clientConfig.getPropertyAsBoolean(TCP_NO_DELAY, false); }
@Test void testGetTcpNoDelayOverride() { clientConfig.set(ConnectionPoolConfigImpl.TCP_NO_DELAY, true); assertTrue(connectionPoolConfig.getTcpNoDelay()); }
public static UClassType create(CharSequence fullyQualifiedClass, List<UType> typeArguments) { return new AutoValue_UClassType( StringName.of(fullyQualifiedClass), ImmutableList.copyOf(typeArguments)); }
@Test public void equality() { UType stringType = UClassType.create("java.lang.String"); new EqualsTester() .addEqualityGroup(stringType) .addEqualityGroup(UClassType.create("java.util.List", stringType)) .addEqualityGroup(UClassType.create("java.util.Map", stringType, stringType)) .addEqualityGroup(UClassType.create("java.lang.Integer")) .addEqualityGroup( UClassType.create("java.util.List", UClassType.create("java.lang.Integer"))) .testEquals(); }
@Override public V pollFirst(long timeout, TimeUnit unit) throws InterruptedException { return commandExecutor.getInterrupted(pollFirstAsync(timeout, unit)); }
@Test public void testPollFirst() throws InterruptedException { RBlockingDeque<Integer> queue1 = redisson.getPriorityBlockingDeque("queue1"); queue1.put(1); queue1.put(2); queue1.put(3); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(1); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(2); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(3); long s = System.currentTimeMillis(); assertThat(queue1.pollFirst(5, TimeUnit.SECONDS)).isNull(); assertThat(System.currentTimeMillis() - s).isGreaterThan(4900); }
@Override public ListenableFuture<?> execute(StartTransaction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Session session = stateMachine.getSession(); if (!session.isClientTransactionSupport()) { throw new PrestoException(StandardErrorCode.INCOMPATIBLE_CLIENT, "Client does not support transactions"); } if (session.getTransactionId().isPresent()) { throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, "Nested transactions not supported"); } Optional<IsolationLevel> isolationLevel = extractIsolationLevel(statement); Optional<Boolean> readOnly = extractReadOnly(statement); TransactionId transactionId = transactionManager.beginTransaction( isolationLevel.orElse(TransactionManager.DEFAULT_ISOLATION), readOnly.orElse(TransactionManager.DEFAULT_READ_ONLY), false); stateMachine.setStartedTransactionId(transactionId); // Since the current session does not contain this new transaction ID, we need to manually mark it as inactive // when this statement completes. transactionManager.trySetInactive(transactionId); return immediateFuture(null); }
@Test public void testNestedTransaction() { TransactionManager transactionManager = createTestTransactionManager(); Session session = sessionBuilder() .setTransactionId(TransactionId.create()) .setClientTransactionSupport() .build(); QueryStateMachine stateMachine = createQueryStateMachine("START TRANSACTION", session, true, transactionManager, executor, metadata); StartTransactionTask startTransactionTask = new StartTransactionTask(); try { getFutureValue(startTransactionTask.execute(new StartTransaction(ImmutableList.of()), transactionManager, metadata, new AllowAllAccessControl(), stateMachine, emptyList())); fail(); } catch (PrestoException e) { assertEquals(e.getErrorCode(), NOT_SUPPORTED.toErrorCode()); } assertTrue(transactionManager.getAllTransactionInfos().isEmpty()); assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()); assertFalse(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent()); }
@Override public Runner get() { return runners.get(); }
@Test void runner_should_wrap_event_bus_bus() { // This avoids problems with JUnit which listens to individual runners EventBus runnerBus = runnerSupplier.get().getBus(); assertAll( () -> assertThat(eventBus, is(not(equalTo(runnerBus)))), () -> assertThat(runnerBus, is(not(equalTo(eventBus))))); }
@GET @Path("/service/{name}") public Uni<List<Record>> getByName(@PathParam("name") String name, @QueryParam("includeOutOfService") Boolean includeOutOfService) { Uni<List<Record>> results; if (includeOutOfService != null) { results = provider.getServicesByName(name, includeOutOfService); } else { results = provider.getServicesByName(name); } return results; }
@Test public void testGetByName() { ServiceRegistration service = createTestService(); // register the service Response registerResponse = given() .body(service) .contentType(MediaType.APPLICATION_JSON) .post("/service-discovery/registry"); // send request to get services by name Response getResponse = when() .get("service-discovery/service/{name}", service.getName()); // verify expected response and extract the returned list List<Map<String, Object>> results = getResponse.then() .statusCode(200) .extract().as(List.class); // verify expected results from the get request assertEquals(1, results.size(), "Unexpected number of services returned"); Object expectedRegistrationId = registerResponse.jsonPath().get("registration"); Object actualRegistrationId = results.get(0).get("registration"); assertEquals(expectedRegistrationId, actualRegistrationId, "Unexpected service returned"); }
@ApiOperation(value = "Create a group", tags = { "Groups" }, code = 201) @ApiResponses(value = { @ApiResponse(code = 201, message = "Indicates the group was created."), @ApiResponse(code = 400, message = "Indicates the id of the group was missing.") }) @PostMapping(value = "/identity/groups", produces = "application/json") @ResponseStatus(HttpStatus.CREATED) public GroupResponse createGroup(@RequestBody GroupRequest groupRequest) { if (groupRequest.getId() == null) { throw new FlowableIllegalArgumentException("Id cannot be null."); } if (restApiInterceptor != null) { restApiInterceptor.createGroup(groupRequest); } // Check if a user with the given ID already exists so we return a CONFLICT if (identityService.createGroupQuery().groupId(groupRequest.getId()).count() > 0) { throw new FlowableConflictException("A group with id '" + groupRequest.getId() + "' already exists."); } Group created = identityService.newGroup(groupRequest.getId()); created.setId(groupRequest.getId()); created.setName(groupRequest.getName()); created.setType(groupRequest.getType()); identityService.saveGroup(created); return restResponseFactory.createGroupResponse(created); }
@Test public void testCreateGroup() throws Exception { try { ObjectNode requestNode = objectMapper.createObjectNode(); requestNode.put("id", "testgroup"); requestNode.put("name", "Test group"); requestNode.put("type", "Test type"); HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP_COLLECTION)); httpPost.setEntity(new StringEntity(requestNode.toString())); CloseableHttpResponse response = executeRequest(httpPost, HttpStatus.SC_CREATED); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + " id: 'testgroup'," + " name: 'Test group'," + " type: 'Test type'," + " url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP, "testgroup") + "'" + "}"); assertThat(identityService.createGroupQuery().groupId("testgroup").singleResult()).isNotNull(); } finally { try { identityService.deleteGroup("testgroup"); } catch (Throwable t) { // Ignore, user might not have been created by test } } }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testCpuPercentageMemoryAbsolute() throws Exception { expectMissingResource("memory"); parseResourceConfigValue("50% cpu, 1024 mb"); }
@Deprecated public static String[] createFilePathList( VariableSpace space, String[] fileName, String[] fileMask, String[] excludeFileMask, String[] fileRequired ) { return createFilePathList( DefaultBowl.getInstance(), space, fileName, fileMask, excludeFileMask, fileRequired ); }
@Test public void testSpecialCharsInFileNamesEscaped() throws IOException, KettleException { System.setProperty( Const.KETTLE_RETURN_ESCAPED_URI_STRINGS, "Y" ); String fileNameWithSpaces = "file name with spaces"; tempFolder.newFile( fileNameWithSpaces ); VariableSpace spaceMock = mock( VariableSpace.class ); when( spaceMock.environmentSubstitute( any( String[].class ) ) ).thenAnswer( (Answer<String[]>) invocationOnMock -> (String[]) invocationOnMock.getArguments()[ 0 ] ); String[] folderNameList = { tempFolder.getRoot().getPath() }; String[] emptyStringArray = { "" }; boolean[] fileRequiredList = { true }; String[] paths = FileInputList .createFilePathList( spaceMock, folderNameList, emptyStringArray, emptyStringArray, emptyStringArray, fileRequiredList ); assertFalse( "File with spaces not encoded", paths[ 0 ].endsWith( fileNameWithSpaces ) ); System.setProperty( Const.KETTLE_RETURN_ESCAPED_URI_STRINGS, "N" ); }
public List<String> findProjectUuidsWithIssuesSyncNeed(DbSession dbSession, Collection<String> projectUuids) { return dbClient.branchDao().selectProjectUuidsWithIssuesNeedSync(dbSession, projectUuids); }
@Test public void findProjectUuidsWithIssuesSyncNeed() { ProjectData projectData1 = insertProjectWithBranches(false, 0); ProjectData projectData2 = insertProjectWithBranches(false, 0); ProjectData projectData3 = insertProjectWithBranches(true, 0); ProjectData projectData4 = insertProjectWithBranches(true, 0); assertThat(underTest.findProjectUuidsWithIssuesSyncNeed(db.getSession(), Arrays.asList(projectData1.getProjectDto().getUuid(), projectData2.getProjectDto().getUuid(), projectData3.getProjectDto().getUuid(), projectData4.getProjectDto().getUuid()))) .containsOnly(projectData3.getProjectDto().getUuid(), projectData4.getProjectDto().getUuid()); }
@Override public void writeLong(final long v) throws IOException { ensureAvailable(LONG_SIZE_IN_BYTES); Bits.writeLong(buffer, pos, v, isBigEndian); pos += LONG_SIZE_IN_BYTES; }
@Test public void testWriteLongV() throws Exception { long expected = 100; out.writeLong(expected); long actual = Bits.readLongB(out.buffer, 0); assertEquals(expected, actual); }
public static Expression convert(Predicate[] predicates) { Expression expression = Expressions.alwaysTrue(); for (Predicate predicate : predicates) { Expression converted = convert(predicate); Preconditions.checkArgument( converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testNotInNull() { String col = "strCol"; NamedReference namedReference = FieldReference.apply(col); LiteralValue nullValue = new LiteralValue(null, DataTypes.StringType); LiteralValue value1 = new LiteralValue("value1", DataTypes.StringType); LiteralValue value2 = new LiteralValue("value2", DataTypes.StringType); // Values only contains null Predicate notInNull = new Not(new Predicate("IN", expressions(namedReference, nullValue))); Expression expectedNotInNull = Expressions.and(Expressions.notNull(col), Expressions.notIn(col)); Expression actualNotInNull = SparkV2Filters.convert(notInNull); assertEquals(expectedNotInNull, actualNotInNull); Predicate notIn = new Not(new Predicate("IN", expressions(namedReference, nullValue, value1, value2))); Expression expectedNotIn = Expressions.and(Expressions.notNull(col), Expressions.notIn(col, "value1", "value2")); Expression actualNotIn = SparkV2Filters.convert(notIn); assertEquals(expectedNotIn, actualNotIn); }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testRepeatedSchema() { assertEquals( TestProtoSchemas.REPEATED_SCHEMA, ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.RepeatPrimitive.class)); }
public static Path get(String appName) { final Path applicationDataDirectory = getPath(appName); try { Files.createDirectories(applicationDataDirectory); } catch (IOException ioe) { throw new RuntimeException("Couldn't find/create AppDataDirectory", ioe); } return applicationDataDirectory; }
@Test public void worksOnCurrentPlatform() { final String appName = "bitcoinj"; String path = AppDataDirectory.get(appName).toString(); if (PlatformUtils.isWindows()) { assertEquals("Path wrong on Windows", winPath(appName), path); } else if (PlatformUtils.isMac()) { assertEquals("Path wrong on Mac", macPath(appName), path); } else if (PlatformUtils.isLinux()) { assertEquals("Path wrong on Linux", unixPath(appName), path); } else { assertEquals("Path wrong on unknown/default", unixPath(appName), path); } }
@Override public void collectSizeStats(StateObjectSizeStatsCollector collector) { metaStateHandle.collectSizeStats(collector); directoryStateHandle.collectSizeStats(collector); }
@Test public void testDirectorySize(@TempDir Path directory) throws IOException { final int metaDataBytes = 42; ByteStreamStateHandle metaDataStateHandle = new ByteStreamStateHandle("MetaDataTest", new byte[metaDataBytes]); final int fileOneBytes = 1024; File outputFile = new File(directory.toFile(), "out.001"); try (FileOutputStream outputStream = new FileOutputStream(outputFile)) { outputStream.write(new byte[fileOneBytes]); } File subPath = new File(directory.toFile(), "subdir"); Preconditions.checkState(subPath.mkdirs()); final int fileTwoBytes = 128; outputFile = new File(subPath, "out.002"); try (FileOutputStream outputStream = new FileOutputStream(outputFile)) { outputStream.write(new byte[fileTwoBytes]); } DirectoryStateHandle directoryStateHandle = DirectoryStateHandle.forPathWithSize(directory); IncrementalLocalKeyedStateHandle handle = new IncrementalLocalKeyedStateHandle( UUID.randomUUID(), 0, directoryStateHandle, new KeyGroupRange(0, 1), metaDataStateHandle, Collections.emptyList()); StateObject.StateObjectSizeStatsCollector stats = StateObject.StateObjectSizeStatsCollector.create(); handle.collectSizeStats(stats); Assertions.assertEquals( metaDataBytes + fileOneBytes + fileTwoBytes, extractLocalStateSizes(stats)); }
public static Path getStagingDir(Cluster cluster, Configuration conf) throws IOException, InterruptedException { UserGroupInformation user = UserGroupInformation.getLoginUser(); return getStagingDir(cluster, conf, user); }
@Test public void testGetStagingDirWhenFullFileOwnerNameAndFullUserName() throws IOException, InterruptedException { Cluster cluster = mock(Cluster.class); Configuration conf = new Configuration(); Path stagingPath = mock(Path.class); UserGroupInformation user = UserGroupInformation .createUserForTesting(USER_1, GROUP_NAMES); assertEquals(USER_1, user.getUserName()); FileSystem fs = new FileSystemTestHelper.MockFileSystem(); when(cluster.getStagingAreaDir()).thenReturn(stagingPath); when(stagingPath.getFileSystem(conf)).thenReturn(fs); //Staging directory owner full principal name is in lower case. String stagingDirOwner = USER_1.toLowerCase(); FileStatus fileStatus = new FileStatus(1, true, 1, 1, 100L, 100L, FsPermission.getDefault(), stagingDirOwner, stagingDirOwner, stagingPath); when(fs.getFileStatus(stagingPath)).thenReturn(fileStatus); assertEquals(stagingPath, JobSubmissionFiles.getStagingDir(cluster, conf, user)); //Staging directory owner full principal name in upper and lower case stagingDirOwner = USER_1; fileStatus = new FileStatus(1, true, 1, 1, 100L, 100L, FsPermission.getDefault(), stagingDirOwner, stagingDirOwner, stagingPath); when(fs.getFileStatus(stagingPath)).thenReturn(fileStatus); assertEquals(stagingPath, JobSubmissionFiles.getStagingDir(cluster, conf, user)); }
public Set<Device> getDevicesFromPath(String path) throws IOException { MutableInt counter = new MutableInt(0); try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) { return stream.filter(p -> p.toFile().getName().startsWith("veslot")) .map(p -> toDevice(p, counter)) .collect(Collectors.toSet()); } }
@Test public void testNegativeDeviceStateNumber() throws IOException { createVeSlotFile(0); createOsStateFile(-1); when(mockCommandExecutor.getOutput()) .thenReturn("8:1:character special file"); when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder); Set<Device> devices = discoverer.getDevicesFromPath(testFolder); assertEquals("Number of devices", 1, devices.size()); Device device = devices.iterator().next(); assertEquals("Device ID", 0, device.getId()); assertEquals("Major number", 8, device.getMajorNumber()); assertEquals("Minor number", 1, device.getMinorNumber()); assertEquals("Status", "Unknown (-1)", device.getStatus()); assertFalse("Device should not be healthy", device.isHealthy()); }
public void deleteWorkflowData(String workflowId, long internalId, long timeoutInNanos) { final long startNanos = System.nanoTime(); Stage stage = getWorkflowDeletionStage(workflowId, internalId); int totalDeleted = 0; int deleted = 0; long stageTime = startNanos; Timeline timeline = new Timeline(null); while (stage != Stage.DELETION_DONE) { int cnt = deleteDataForStage(workflowId, internalId, stage); TimeUtils.sleep(BATCH_DELAY_IN_MILLIS) .ifPresent( details -> LOG.info( "Thread is interrupted, ignore it and just continue without sleeping: {}", details)); totalDeleted += cnt; deleted += cnt; long curTime = System.nanoTime(); if (cnt < Constants.BATCH_DELETION_LIMIT || curTime - startNanos >= timeoutInNanos) { TimelineEvent event = TimelineLogEvent.info( "Deleted [%s] items in the stage of [%s], taking [%s] millis", deleted, stage.name(), TimeUnit.NANOSECONDS.toMillis(curTime - stageTime)); LOG.info(event.getMessage()); timeline.add(event); if (cnt < Constants.BATCH_DELETION_LIMIT) { deleted = 0; stage = DELETION_STAGES[stage.ordinal() + 1]; // move to the next stage stageTime = curTime; } if (curTime - startNanos >= timeoutInNanos) { break; } } } LOG.info( "Totally deleted {} rows while deleting workflow [{}][{}] and then " + "complete this round of deletion at stage [{}], taking [{}] millis.", totalDeleted, workflowId, internalId, stage.name(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos)); int updated = updateWorkflowDeletionStage(workflowId, internalId, stage, timeline); if (updated != SUCCESS_WRITE_SIZE) { throw new MaestroRetryableError( "Failed to update workflow deletion data for workflow [%s][%s], retry it later", workflowId, internalId); } if (stage != Stage.DELETION_DONE) { LOG.info( "Workflow [{}][{}] deletion is timed out and will retry it later", workflowId, internalId); throw new MaestroTimeoutException( "Workflow [%s][%s] deletion is timed out", workflowId, internalId); } }
@Test public void testDeleteWorkflowData() throws Exception { WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID1); workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties()); reset(publisher); ArgumentCaptor<DeleteWorkflowJobEvent> argumentCaptor = ArgumentCaptor.forClass(DeleteWorkflowJobEvent.class); workflowDao.deleteWorkflow(TEST_WORKFLOW_ID1, User.create("tester")); Mockito.verify(publisher, times(1)).publishOrThrow(argumentCaptor.capture(), any()); DeleteWorkflowJobEvent deleteWorkflowJobEvent = argumentCaptor.getValue(); assertEquals(TEST_WORKFLOW_ID1, deleteWorkflowJobEvent.getWorkflowId()); assertEquals("tester", deleteWorkflowJobEvent.getAuthor().getName()); assertTrue( deletionDao.isDeletionInitialized( TEST_WORKFLOW_ID1, deleteWorkflowJobEvent.getInternalId())); deletionDao.deleteWorkflowData( TEST_WORKFLOW_ID1, deleteWorkflowJobEvent.getInternalId(), TimeUnit.MINUTES.toNanos(1)); assertFalse( deletionDao.isDeletionInitialized( TEST_WORKFLOW_ID1, deleteWorkflowJobEvent.getInternalId())); assertFalse(deletionDao.isDeletionInProgress(TEST_WORKFLOW_ID1)); }
@Udf(description = "Returns all substrings of the input that matches the given regex pattern") public List<String> regexpExtractAll( @UdfParameter(description = "The regex pattern") final String pattern, @UdfParameter(description = "The input string to apply regex on") final String input ) { return regexpExtractAll(pattern, input, 0); }
@Test public void shouldReturnEmptyWhenNoMatch() { assertThat(udf.regexpExtractAll("tst", "test string"), empty()); }
@Override public void batchRegisterInstance(String serviceName, String groupName, List<Instance> instances) throws NacosException { NamingUtils.batchCheckInstanceIsLegal(instances); batchCheckAndStripGroupNamePrefix(instances, groupName); clientProxy.batchRegisterService(serviceName, groupName, instances); }
@Test void testBatchRegisterInstanceWithGroupNamePrefix() throws NacosException { Instance instance = new Instance(); String serviceName = "service1"; String ip = "1.1.1.1"; int port = 10000; instance.setServiceName(Constants.DEFAULT_GROUP + "@@" + serviceName); instance.setEphemeral(true); instance.setPort(port); instance.setIp(ip); List<Instance> instanceList = new ArrayList<>(); instanceList.add(instance); //when client.batchRegisterInstance(serviceName, Constants.DEFAULT_GROUP, instanceList); //then verify(proxy, times(1)).batchRegisterService(eq(serviceName), eq(Constants.DEFAULT_GROUP), argThat(instances -> CollectionUtils.isEqualCollection(instanceList, instances))); }
@Override public Set<TopologyCluster> getClusters(Topology topology) { return defaultTopology(topology).getClusters(); }
@Test public void testGetClusters() { VirtualNetwork virtualNetwork = setupVirtualNetworkTopology(); TopologyService topologyService = manager.get(virtualNetwork.id(), TopologyService.class); Topology topology = topologyService.currentTopology(); // test the getClusters() method. assertNotNull("The clusters should not be null.", topologyService.getClusters(topology)); assertEquals("The clusters size did not match.", 2, topologyService.getClusters(topology).size()); }
@Override public byte[] serialize(final String topic, final TimestampedKeyAndJoinSide<K> data) { final byte boolByte = (byte) (data.isLeftSide() ? 1 : 0); final byte[] keyBytes = keySerializer.serialize(topic, data.getKey()); final byte[] timestampBytes = timestampSerializer.serialize(topic, data.getTimestamp()); return ByteBuffer .allocate(timestampBytes.length + 1 + keyBytes.length) .put(timestampBytes) .put(boolByte) .put(keyBytes) .array(); }
@Test public void shouldSerializeKeyWithJoinSideAsFalse() { final String value = "some-string"; final TimestampedKeyAndJoinSide<String> timestampedKeyAndJoinSide = TimestampedKeyAndJoinSide.makeRight(value, 20); final byte[] serialized = STRING_SERDE.serializer().serialize(TOPIC, timestampedKeyAndJoinSide); assertThat(serialized, is(notNullValue())); final TimestampedKeyAndJoinSide<String> deserialized = STRING_SERDE.deserializer().deserialize(TOPIC, serialized); assertThat(deserialized, is(timestampedKeyAndJoinSide)); }
public static boolean isStorage(ServiceCluster cluster) { return ServiceType.STORAGE.equals(cluster.serviceType()); }
@Test public void verifyStorageClusterIsRecognized() { ServiceCluster cluster = createServiceCluster(ServiceType.STORAGE); assertTrue(VespaModelUtil.isStorage(cluster)); cluster = createServiceCluster(ServiceType.STORAGE); assertTrue(VespaModelUtil.isStorage(cluster)); }
public String createRegexForUrl(String url) { return "^" + Pattern.quote(url) + "$"; }
@Test public void create() { String url = "https://example.com/api/lookup"; String expected = "^\\Qhttps://example.com/api/lookup\\E$"; String got = regexHelper.createRegexForUrl(url); assertThat(got).isEqualTo(expected); Pattern compiled = Pattern.compile(got, Pattern.DOTALL); assertThat(compiled.matcher(url).find()).isTrue(); }
@NonNull @Override public ConnectionFileName toPvfsFileName( @NonNull FileName providerFileName, @NonNull T details ) throws KettleException { // Determine the part of provider file name following the connection "root". // Use the transformer to generate the connection root provider uri. // Both uris are assumed to be normalized. // Examples: // - connectionRootProviderUri: "hcp://domain.my:443/root/path/" | "s3://" | "local://" // - providerUri: "hcp://domain.my:443/root/path/rest/path" | "s3://rest/path" // Example: "pvfs://my-connection" String connectionRootProviderUri = getConnectionRootProviderUriPrefix( details ); String providerUri = providerFileName.getURI(); if ( !connectionFileNameUtils.isDescendantOrSelf( providerUri, connectionRootProviderUri ) ) { throw new IllegalArgumentException( String.format( "Provider file name '%s' is not a descendant of the connection root '%s'.", providerUri, connectionRootProviderUri ) ); } String restUriPath = providerUri.substring( connectionRootProviderUri.length() ); // Examples: "/rest/path" or "rest/path" return buildPvfsFileName( details, restUriPath, providerFileName.getType() ); }
@Test public void testToPvfsFileNameHandlesConnectionsWithRootPath() throws Exception { // Local (always with root path) mockDetailsWithRootPath( details1, "my/root/path" ); String connectionRootProviderUriPrefix = "scheme1://my/root/path"; String restPath = "/rest/path"; FileName providerFileName = mockFileNameWithUri( FileName.class, connectionRootProviderUriPrefix + restPath ); ConnectionFileName pvfsFileName = transformer.toPvfsFileName( providerFileName, details1 ); assertEquals( "pvfs://connection-name1" + restPath, pvfsFileName.getURI() ); // Should do connection root provider uri normalization. verify( kettleVFS, times( 1 ) ).resolveURI( connectionRootProviderUriPrefix ); }
public String getLocalAddr() { InetSocketAddress localAddress = localAddress(); if (localAddress.getAddress() == null) return null; return localAddress.getAddress().getHostAddress(); }
@Test void requireThatUnresolvableRemoteAddressesAreSupported() { URI uri = URI.create("http://doesnotresolve.zzz:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); DiscFilterRequest request = new DiscFilterRequest(httpReq); assertNull(request.getLocalAddr()); }
@Override public String execute(CommandContext commandContext, String[] args) { if (ArrayUtils.isEmpty(args)) { return "Please input method name, eg: \r\ninvoke xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n" + "invoke XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n" + "invoke com.xxx.XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})"; } Channel channel = commandContext.getRemote(); String service = channel.attr(ChangeTelnet.SERVICE_KEY) != null ? channel.attr(ChangeTelnet.SERVICE_KEY).get() : null; String message = args[0]; int i = message.indexOf("("); if (i < 0 || !message.endsWith(")")) { return "Invalid parameters, format: service.method(args)"; } String method = message.substring(0, i).trim(); String param = message.substring(i + 1, message.length() - 1).trim(); i = method.lastIndexOf("."); if (i >= 0) { service = method.substring(0, i).trim(); method = method.substring(i + 1).trim(); } if (StringUtils.isEmpty(service)) { return "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first," + " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]"; } List<Object> list; try { list = JsonUtils.toJavaList("[" + param + "]", Object.class); } catch (Throwable t) { return "Invalid json argument, cause: " + t.getMessage(); } StringBuilder buf = new StringBuilder(); Method invokeMethod = null; ProviderModel selectedProvider = null; if (isInvokedSelectCommand(channel)) { selectedProvider = channel.attr(INVOKE_METHOD_PROVIDER_KEY).get(); invokeMethod = channel.attr(SelectTelnet.SELECT_METHOD_KEY).get(); } else { for (ProviderModel provider : frameworkModel.getServiceRepository().allProviderModels()) { if (!isServiceMatch(service, provider)) { continue; } selectedProvider = provider; List<Method> methodList = findSameSignatureMethod(provider.getAllMethods(), method, list); if (CollectionUtils.isEmpty(methodList)) { break; } if (methodList.size() == 1) { invokeMethod = methodList.get(0); } else { List<Method> matchMethods = findMatchMethods(methodList, list); if (CollectionUtils.isEmpty(matchMethods)) { break; } if (matchMethods.size() == 1) { invokeMethod = matchMethods.get(0); } else { // exist overridden method channel.attr(INVOKE_METHOD_PROVIDER_KEY).set(provider); channel.attr(INVOKE_METHOD_LIST_KEY).set(matchMethods); channel.attr(INVOKE_MESSAGE_KEY).set(message); printSelectMessage(buf, matchMethods); return buf.toString(); } } break; } } if (!StringUtils.isEmpty(service)) { buf.append("Use default service ").append(service).append('.'); } if (selectedProvider == null) { buf.append("\r\nNo such service ").append(service); return buf.toString(); } if (invokeMethod == null) { buf.append("\r\nNo such method ") .append(method) .append(" in service ") .append(service); return buf.toString(); } try { Object[] array = realize(list.toArray(), invokeMethod.getParameterTypes(), invokeMethod.getGenericParameterTypes()); long start = System.currentTimeMillis(); AppResponse result = new AppResponse(); try { Object o = invokeMethod.invoke(selectedProvider.getServiceInstance(), array); boolean setValueDone = false; if (RpcContext.getServerAttachment().isAsyncStarted()) { AsyncContext asyncContext = RpcContext.getServerAttachment().getAsyncContext(); if (asyncContext instanceof AsyncContextImpl) { CompletableFuture<Object> internalFuture = ((AsyncContextImpl) asyncContext).getInternalFuture(); result.setValue(internalFuture.get()); setValueDone = true; } } if (!setValueDone) { result.setValue(o); } } catch (Throwable t) { result.setException(t); if (t instanceof InterruptedException) { Thread.currentThread().interrupt(); } } finally { RpcContext.removeContext(); } long end = System.currentTimeMillis(); buf.append("\r\nresult: "); buf.append(JsonUtils.toJson(result.recreate())); buf.append("\r\nelapsed: "); buf.append(end - start); buf.append(" ms."); } catch (Throwable t) { return "Failed to invoke method " + invokeMethod.getName() + ", cause: " + StringUtils.toString(t); } return buf.toString(); }
@Test void testInvokeDefaultService() throws RemotingException { defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(DemoService.class.getName()); defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).set(null); given(mockChannel.attr(ChangeTelnet.SERVICE_KEY)) .willReturn(defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY)); given(mockChannel.attr(SelectTelnet.SELECT_KEY)).willReturn(defaultAttributeMap.attr(SelectTelnet.SELECT_KEY)); registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class); String result = invoke.execute(mockCommandContext, new String[] {"echo(\"ok\")"}); assertTrue(result.contains("result: \"ok\"")); defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).remove(); defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).remove(); }
PdlBuilder writeProperties(List<String> prefix, Map<String, Object> properties) throws IOException { List<Map.Entry<String, Object>> orderedProperties = properties.entrySet().stream() .sorted(Map.Entry.comparingByKey()) .collect(Collectors.toList()); for (Map.Entry<String, Object> entry : orderedProperties) { String key = entry.getKey(); Object value = entry.getValue(); // Copy the prefix path segments and append the current segment ArrayList<String> pathParts = new ArrayList<>(prefix); pathParts.add(key); if (value instanceof DataMap) { DataMap dm = (DataMap) value; // Decide encoding style based on map branches/size if (dm.size() == 1) { // encode value property like @x.y.z = "value" writeProperties(pathParts, dm); } else { // encode value property like @x = { "y": { "z": "value" } } writeProperty(pathParts, dm); } } else if (Boolean.TRUE.equals(value)) { // Use shorthand for boolean true. Instead of writing "@deprecated = true", // write "@deprecated". indent().write("@").writePath(pathParts).newline(); } else { writeProperty(pathParts, value); } } return this; }
@Test(dataProvider = "propertiesMapProvider") public void testWriteProperties(Map<String, Object> properties, String indentPdlString, String compactPdlString) throws IOException { StringWriter indentWriter = new StringWriter(); PdlBuilder indentPdlBuilder = (new IndentedPdlBuilder.Provider()).newInstance(indentWriter); indentPdlBuilder.writeProperties(Collections.emptyList(), properties); StringWriter compactWriter = new StringWriter(); PdlBuilder compactPdlBuilder = (new CompactPdlBuilder.Provider()).newInstance(compactWriter); compactPdlBuilder.writeProperties(Collections.emptyList(), properties); Assert.assertEquals(indentPdlString, indentWriter.toString()); Assert.assertEquals(compactPdlString, compactWriter.toString()); }
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers); if (filteredOpenAPI == null) { return filteredOpenAPI; } OpenAPI clone = new OpenAPI(); clone.info(filteredOpenAPI.getInfo()); clone.openapi(filteredOpenAPI.getOpenapi()); clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect()); clone.setSpecVersion(filteredOpenAPI.getSpecVersion()); clone.setExtensions(filteredOpenAPI.getExtensions()); clone.setExternalDocs(filteredOpenAPI.getExternalDocs()); clone.setSecurity(filteredOpenAPI.getSecurity()); clone.setServers(filteredOpenAPI.getServers()); clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags())); final Set<String> allowedTags = new HashSet<>(); final Set<String> filteredTags = new HashSet<>(); Paths clonedPaths = new Paths(); if (filteredOpenAPI.getPaths() != null) { for (String resourcePath : filteredOpenAPI.getPaths().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clonedPaths.addPathItem(resourcePath, clonedPathItem); } } } clone.paths(clonedPaths); } filteredTags.removeAll(allowedTags); final List<Tag> tags = clone.getTags(); if (tags != null && !filteredTags.isEmpty()) { tags.removeIf(tag -> filteredTags.contains(tag.getName())); if (clone.getTags().isEmpty()) { clone.setTags(null); } } if (filteredOpenAPI.getWebhooks() != null) { for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clone.addWebhooks(resourcePath, clonedPathItem); } } } } if (filteredOpenAPI.getComponents() != null) { clone.components(new Components()); clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers)); clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes()); clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks()); clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples()); clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions()); clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders()); clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks()); clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters()); clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies()); clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses()); clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems()); } if (filter.isRemovingUnreferencedDefinitions()) { clone = removeBrokenReferenceDefinitions(clone); } return clone; }
@Test(description = "it should retain non-broken reference model composed properties") public void retainNonBrokenReferenceModelComposedProperties() throws IOException { final OpenAPI openAPI = getOpenAPI(RESOURCE_REFERRED_SCHEMAS); assertNotNull(openAPI.getComponents().getSchemas().get("User")); final NoOpOperationsFilter noOperationsFilter = new NoOpOperationsFilter(); OpenAPI filtered = new SpecFilter().filter(openAPI, noOperationsFilter, null, null, null); assertNotNull(filtered.getComponents().getSchemas().get("User")); final RemoveUnreferencedDefinitionsFilter refFilter = new RemoveUnreferencedDefinitionsFilter(); filtered = new SpecFilter().filter(openAPI, refFilter, null, null, null); assertNotNull(filtered.getComponents().getSchemas().get("User")); assertNotNull(filtered.getComponents().getSchemas().get("Pet")); }
@Nonnull public static <E, K, V, R> Sink<E> mapWithEntryProcessor( @Nonnull String mapName, @Nonnull FunctionEx<? super E, ? extends K> toKeyFn, @Nonnull FunctionEx<? super E, ? extends EntryProcessor<K, V, R>> toEntryProcessorFn ) { return Sinks.<E, K, V, R>mapEntryProcessorBuilder(mapName) .toKeyFn(toKeyFn) .toEntryProcessorFn(toEntryProcessorFn) .build(); }
@Test @Category(SlowTest.class) public void mapWithEntryProcessor_testBackpressure() { /* NOTE TO THE TEST This test tries to test that when a permit for async op is denied, the processor correctly yields. We don't assert that it actually happened, nor that the backpressure was applied to the upstream. We only try to simulate a slow sink (using the SleepingEntryProcessor) and check that the results are correct. */ String targetMap = randomMapName(); List<Integer> input = sequence(5_001); p.readFrom(TestSources.items(input)) .writeTo(Sinks.mapWithEntryProcessor(targetMap, FunctionEx.identity(), SleepingEntryProcessor::new)); execute(); Map<Integer, Integer> actual = new HashMap<>(hz().getMap(targetMap)); Map<Integer, Integer> expected = input.stream() .collect(toMap(Function.identity(), Function.identity(), Integer::sum)); assertEquals(expected, actual); }
@Override public void start() { boolean hasExternalPlugins = pluginRepository.getPlugins().stream().anyMatch(plugin -> plugin.getType().equals(PluginType.EXTERNAL)); try (DbSession session = dbClient.openSession(false)) { PropertyDto property = Optional.ofNullable(dbClient.propertiesDao().selectGlobalProperty(session, PLUGINS_RISK_CONSENT)) .orElse(defaultPluginRiskConsentProperty()); if (hasExternalPlugins && NOT_ACCEPTED == PluginRiskConsent.valueOf(property.getValue())) { addWarningInSonarDotLog(); property.setValue(REQUIRED.name()); dbClient.propertiesDao().saveProperty(session, property); session.commit(); } else if (!hasExternalPlugins && REQUIRED == PluginRiskConsent.valueOf(property.getValue())) { dbClient.propertiesDao().deleteGlobalProperty(PLUGINS_RISK_CONSENT, session); session.commit(); } } }
@Test public void consent_should_be_not_accepted_when_there_is_no_external_plugin_and_never_been_accepted() { setupExternalPluginConsent(REQUIRED); setupBundledPlugin(); underTest.start(); assertThat(dbClient.propertiesDao().selectGlobalProperty(PLUGINS_RISK_CONSENT)).isNull(); }
@Override public String named() { return PluginEnum.MOTAN.getName(); }
@Test public void testNamed() { Assertions.assertEquals(motanPlugin.named(), "motan"); }
public static StatementExecutorResponse execute( final ConfiguredStatement<AssertSchema> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { return AssertExecutor.execute( statement.getMaskedStatementText(), statement.getStatement(), executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS), serviceContext, (stmt, sc) -> assertSchema( sc.getSchemaRegistryClient(), ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()), (str, stmt) -> new AssertSchemaEntity( str, ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()) ); }
@Test public void shouldAssertNotExistSchemaById() { // Given final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.empty(), Optional.of(100), Optional.empty(), false); final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement .of(KsqlParser.PreparedStatement.of("", assertSchema), SessionConfig.of(ksqlConfig, ImmutableMap.of())); // When: final Optional<KsqlEntity> entity = AssertSchemaExecutor .execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity(); // Then: assertThat("expected response!", entity.isPresent()); assertThat(((AssertSchemaEntity) entity.get()).getSubject(), is(Optional.empty())); assertThat(((AssertSchemaEntity) entity.get()).getId(), is(Optional.of(100))); assertThat(((AssertSchemaEntity) entity.get()).getExists(), is(false)); }
@Override public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) { final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size()); for (TopicPartitionReplica replica : replicaAssignment.keySet()) futures.put(replica, new KafkaFutureImpl<>()); Map<Integer, AlterReplicaLogDirsRequestData> replicaAssignmentByBroker = new HashMap<>(); for (Map.Entry<TopicPartitionReplica, String> entry: replicaAssignment.entrySet()) { TopicPartitionReplica replica = entry.getKey(); String logDir = entry.getValue(); int brokerId = replica.brokerId(); AlterReplicaLogDirsRequestData value = replicaAssignmentByBroker.computeIfAbsent(brokerId, key -> new AlterReplicaLogDirsRequestData()); AlterReplicaLogDir alterReplicaLogDir = value.dirs().find(logDir); if (alterReplicaLogDir == null) { alterReplicaLogDir = new AlterReplicaLogDir(); alterReplicaLogDir.setPath(logDir); value.dirs().add(alterReplicaLogDir); } AlterReplicaLogDirTopic alterReplicaLogDirTopic = alterReplicaLogDir.topics().find(replica.topic()); if (alterReplicaLogDirTopic == null) { alterReplicaLogDirTopic = new AlterReplicaLogDirTopic().setName(replica.topic()); alterReplicaLogDir.topics().add(alterReplicaLogDirTopic); } alterReplicaLogDirTopic.partitions().add(replica.partition()); } final long now = time.milliseconds(); for (Map.Entry<Integer, AlterReplicaLogDirsRequestData> entry: replicaAssignmentByBroker.entrySet()) { final int brokerId = entry.getKey(); final AlterReplicaLogDirsRequestData assignment = entry.getValue(); runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) { @Override public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) { return new AlterReplicaLogDirsRequest.Builder(assignment); } @Override public void handleResponse(AbstractResponse abstractResponse) { AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse; for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) { for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) { TopicPartitionReplica replica = new TopicPartitionReplica( topicResult.topicName(), partitionResult.partitionIndex(), brokerId); KafkaFutureImpl<Void> future = futures.get(replica); if (future == null) { log.warn("The partition {} in the response from broker {} is not in the request", new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), brokerId); } else if (partitionResult.errorCode() == Errors.NONE.code()) { future.complete(null); } else { future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception()); } } } // The server should send back a response for every replica. But do a sanity check anyway. completeUnrealizedFutures( futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId), replica -> "The response from broker " + brokerId + " did not contain a result for replica " + replica); } @Override void handleFailure(Throwable throwable) { // Only completes the futures of brokerId completeAllExceptionally( futures.entrySet().stream() .filter(entry -> entry.getKey().brokerId() == brokerId) .map(Map.Entry::getValue), throwable); } }, now); } return new AlterReplicaLogDirsResult(new HashMap<>(futures)); }
@Test public void testAlterReplicaLogDirsPartialResponse() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 1); TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0); TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 2, 0); Map<TopicPartitionReplica, String> logDirs = new HashMap<>(); logDirs.put(tpr1, "/data1"); logDirs.put(tpr2, "/data1"); AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs); assertNull(result.values().get(tpr1).get()); TestUtils.assertFutureThrows(result.values().get(tpr2), ApiException.class); } }
public FluentBackoff withMaxCumulativeBackoff(Duration maxCumulativeBackoff) { checkArgument( maxCumulativeBackoff.isLongerThan(Duration.ZERO), "maxCumulativeBackoff %s must be at least 1 millisecond", maxCumulativeBackoff); return new FluentBackoff( exponent, initialBackoff, maxBackoff, maxCumulativeBackoff, maxRetries, throttledTimeCounter); }
@Test public void testInvalidCumulativeBackoff() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("maxCumulativeBackoff PT-0.002S must be at least 1 millisecond"); defaultBackoff.withMaxCumulativeBackoff(Duration.millis(-2)); }
@Udf public List<Integer> generateSeriesInt( @UdfParameter(description = "The beginning of the series") final int start, @UdfParameter(description = "Marks the end of the series (inclusive)") final int end ) { return generateSeriesInt(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithPositiveEvenStepInt() { final List<Integer> range = rangeUdf.generateSeriesInt(0, 9, 2); assertThat(range, hasSize(5)); int val = 0; for (final int i : range) { assertThat(val, is(i)); val += 2; } }
@Override public void processElement(StreamRecord<FlinkInputSplit> element) { splits.add(element.getValue()); enqueueProcessSplits(); }
@TestTemplate public void testProcessAllRecords() throws Exception { List<List<Record>> expectedRecords = generateRecordsAndCommitTxn(10); List<FlinkInputSplit> splits = generateSplits(); assertThat(splits).hasSize(10); try (OneInputStreamOperatorTestHarness<FlinkInputSplit, RowData> harness = createReader()) { harness.setup(); harness.open(); SteppingMailboxProcessor processor = createLocalMailbox(harness); List<Record> expected = Lists.newArrayList(); for (int i = 0; i < splits.size(); i++) { // Process this element to enqueue to mail-box. harness.processElement(splits.get(i), -1); // Run the mail-box once to read all records from the given split. assertThat(processor.runMailboxStep()).as("Should processed 1 split").isTrue(); // Assert the output has expected elements. expected.addAll(expectedRecords.get(i)); TestHelpers.assertRecords(readOutputValues(harness), expected, SCHEMA); } } }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore .store(QueryableStoreTypes.sessionStore(), partition); return KsMaterializedQueryResult.rowIterator( findSession(store, key, windowStart, windowEnd).iterator()); } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldIgnoreSessionsThatStartAfterUpperBound() { // Given: givenSingleSession(UPPER_INSTANT.plusMillis(1), UPPER_INSTANT.plusMillis(1)); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(false)); }
public static TemplateEngine createEngine() { return TemplateFactory.create(); }
@Test public void createEngineTest() { // 字符串模板, 默认模板引擎,此处为Beetl TemplateEngine engine = TemplateUtil.createEngine(new TemplateConfig()); Template template = engine.getTemplate("hello,${name}"); String result = template.render(Dict.create().set("name", "hutool")); assertEquals("hello,hutool", result); // classpath中获取模板 engine = TemplateUtil.createEngine(new TemplateConfig("templates", ResourceMode.CLASSPATH)); Template template2 = engine.getTemplate("beetl_test.btl"); String result2 = template2.render(Dict.create().set("name", "hutool")); assertEquals("hello,hutool", result2); }
public synchronized LogAction record(double... values) { return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values); }
@Test public void testBasicLogging() { assertTrue(helper.record().shouldLog()); for (int i = 0; i < 5; i++) { timer.advance(LOG_PERIOD / 10); assertFalse(helper.record().shouldLog()); } timer.advance(LOG_PERIOD); assertTrue(helper.record().shouldLog()); }
@Override public Properties info(RedisClusterNode node) { Map<String, String> info = execute(node, RedisCommands.INFO_ALL); Properties result = new Properties(); for (Entry<String, String> entry : info.entrySet()) { result.setProperty(entry.getKey(), entry.getValue()); } return result; }
@Test public void testInfo() { RedisClusterNode master = getFirstMaster(); Properties info = connection.info(master); assertThat(info.size()).isGreaterThan(10); }
@Override public boolean tableExists(String dbName, String tblName) { return hiveMetadata.tableExists(dbName, tblName); }
@Test public void testTableExists(@Mocked HiveTable hiveTable) { new Expectations() { { hiveMetadata.tableExists("test_db", "test_tbl"); result = true; minTimes = 1; } }; boolean exists = unifiedMetadata.tableExists("test_db", "test_tbl"); Assert.assertTrue(exists); }
public Optional<String> getCredentialsFile() { return credentialsFile; }
@Test public void testExplicitPropertyMappingsWithCredentialsFile() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("bigquery.credentials-file", "cfile") .build(); ConfigurationFactory configurationFactory = new ConfigurationFactory(properties); BigQueryConfig config = configurationFactory.build(BigQueryConfig.class); assertEquals(config.getCredentialsFile(), Optional.of("cfile")); }
protected boolean clearMetricsHeaders(Message in) { return in.removeHeaders(HEADER_PATTERN); }
@Test public void testClearMetricsHeaders() { when(in.removeHeaders(HEADER_PATTERN)).thenReturn(true); assertThat(okProducer.clearMetricsHeaders(in), is(true)); inOrder.verify(in, times(1)).removeHeaders(HEADER_PATTERN); inOrder.verifyNoMoreInteractions(); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldHandleUnqualifiedSelectStarOnJoin() { // Given: final SingleStatementContext stmt = givenQuery("SELECT * FROM TEST1 JOIN TEST2 WITHIN 1 SECOND ON TEST1.ID = TEST2.ID;"); // When: final Query result = (Query) builder.buildStatement(stmt); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of(new AllColumns(Optional.empty()))))); }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void testRootMethodName() throws Exception { String testExpression = "#root.methodName"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); String result = sut.resolve(testMethod, new Object[]{}, testExpression); assertThat(result).isEqualTo("testMethod"); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testInvalidPojoField() { String[] forwardedFields = {"invalidField"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, pojoType, threeIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void missingVariablesOnPathAreRemoved() { String template = "https://www.example.com/{foo}/items?name={name}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); assertThat(uriTemplate.getVariables()).contains("foo", "name").hasSize(2); Map<String, Object> variables = new LinkedHashMap<>(); variables.put("name", "Albert"); String expandedTemplate = uriTemplate.expand(variables); assertThat(expandedTemplate) .isEqualToIgnoringCase("https://www.example.com//items?name=Albert"); assertThat(URI.create(expandedTemplate)).isNotNull(); }
public double elapsedSeconds() { return elapsed(TimeUnit.SECONDS); }
@Test void elapsedSeconds() { Supplier<Long> mockClock = mock(Supplier.class); when(mockClock.get()).thenReturn(SECONDS.toNanos(1), SECONDS.toNanos(3)); Timer timer = new Timer(mockClock); assertThat(timer.elapsedSeconds()).isEqualTo(2.0d); }
@Override public synchronized List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException { String containerIdStr = container.getContainerId().toString(); // Assign Gpus to container if requested some. GpuResourceAllocator.GpuAllocation allocation = gpuAllocator.assignGpus( container); // Create device cgroups for the container cGroupsHandler.createCGroup(CGroupsHandler.CGroupController.DEVICES, containerIdStr); if (!OCIContainerRuntime.isOCICompliantContainerRequested( nmContext.getConf(), container.getLaunchContext().getEnvironment())) { // Write to devices cgroup only for non-docker container. The reason is // docker engine runtime runc do the devices cgroups initialize in the // pre-hook, see: // https://github.com/opencontainers/runc/blob/master/libcontainer/configs/device_defaults.go // // YARN by default runs docker container inside cgroup, if we setup cgroups // devices.deny for the parent cgroup for launched container, we can see // errors like: failed to write c *:* m to devices.allow: // write path-to-parent-cgroup/<container-id>/devices.allow: // operation not permitted. // // To avoid this happen, if docker is requested when container being // launched, we will not setup devices.deny for the container. Instead YARN // will pass --device parameter to docker engine. See NvidiaDockerV1CommandPlugin try { // Execute c-e to setup GPU isolation before launch the container PrivilegedOperation privilegedOperation = new PrivilegedOperation( PrivilegedOperation.OperationType.GPU, Arrays.asList(CONTAINER_ID_CLI_OPTION, containerIdStr)); if (!allocation.getDeniedGPUs().isEmpty()) { List<Integer> minorNumbers = new ArrayList<>(); for (GpuDevice deniedGpu : allocation.getDeniedGPUs()) { minorNumbers.add(deniedGpu.getMinorNumber()); } privilegedOperation.appendArgs(Arrays.asList(EXCLUDED_GPUS_CLI_OPTION, StringUtils.join(",", minorNumbers))); } privilegedOperationExecutor.executePrivilegedOperation( privilegedOperation, true); } catch (PrivilegedOperationException e) { cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.DEVICES, containerIdStr); LOG.warn("Could not update cgroup for container", e); throw new ResourceHandlerException(e); } List<PrivilegedOperation> ret = new ArrayList<>(); ret.add(new PrivilegedOperation( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupsHandler .getPathForCGroupTasks(CGroupsHandler.CGroupController.DEVICES, containerIdStr))); return ret; } return null; }
@Test public void testAssignedGpuWillBeCleanedUpWhenStoreOpFails() throws Exception { initializeGpus(); doThrow(new IOException("Exception ...")).when(mockNMStateStore) .storeAssignedResources( any(Container.class), anyString(), anyList()); boolean exception = false; /* Start container 1, asks 3 containers */ try { gpuResourceHandler.preStart(mockContainerWithGpuRequest(1, createResourceRequest(3))); } catch (ResourceHandlerException e) { exception = true; } assertTrue("preStart should throw exception", exception); // After preStart, we still have 4 available GPU since the store op failed. verifyNumberOfAvailableGpus(4, gpuResourceHandler); }
@Override public String getInterpreterSettingName() { return interpreterSettingName; }
@Test void testCreateIntpProcess() throws IOException { DockerInterpreterLauncher launcher = new DockerInterpreterLauncher(zConf, null); Properties properties = new Properties(); properties.setProperty( ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName(), "5000"); InterpreterOption option = new InterpreterOption(); InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "groupName", "name", 0, "host"); InterpreterClient client = launcher.launch(context); assertTrue(client instanceof DockerInterpreterProcess); DockerInterpreterProcess interpreterProcess = (DockerInterpreterProcess) client; assertEquals("name", interpreterProcess.getInterpreterSettingName()); assertEquals("/spark", interpreterProcess.containerSparkHome); assertTrue(interpreterProcess.uploadLocalLibToContainter); assertNotEquals("http://my-docker-host:2375", interpreterProcess.dockerHost); }
public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) { KafkaMetadataState currentState = metadataState; metadataState = switch (currentState) { case KRaft -> onKRaft(kafkaStatus); case ZooKeeper -> onZooKeeper(kafkaStatus); case KRaftMigration -> onKRaftMigration(kafkaStatus); case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus); case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus); case PreKRaft -> onPreKRaft(kafkaStatus); }; if (metadataState != currentState) { LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno); } else { LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno); } return metadataState; }
@Test public void testWarningInKRaft() { List<String> wrongAnnotations = List.of("rollback", "disabled", "migration"); for (String annotation : wrongAnnotations) { Kafka kafka = new KafkaBuilder(KAFKA) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, annotation) .endMetadata() .withNewStatus() .withKafkaMetadataState(KRaft) .endStatus() .build(); KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), "The strimzi.io/kraft annotation can't be set to 'migration', 'rollback' or 'disabled' because the cluster is already KRaft."); assertEquals(kafka.getStatus().getKafkaMetadataState(), KRaft); } }
@ScalarOperator(INDETERMINATE) @SqlType(StandardTypes.BOOLEAN) public static boolean indeterminate(@SqlType(StandardTypes.BIGINT) long value, @IsNull boolean isNull) { return isNull; }
@Test public void testIndeterminate() { assertOperator(INDETERMINATE, "cast(null as bigint)", BOOLEAN, true); assertOperator(INDETERMINATE, "cast(1 as bigint)", BOOLEAN, false); assertOperator(INDETERMINATE, "cast(4499999999 as bigint)", BOOLEAN, false); assertOperator(INDETERMINATE, "4499999999", BOOLEAN, false); }
public SqlBuilder orderBy(Order... orders) { if (ArrayUtil.isEmpty(orders)) { return this; } sql.append(" ORDER BY "); String field; boolean isFirst = true; for (Order order : orders) { field = order.getField(); if (null != wrapper) { // 包装字段名 field = wrapper.wrap(field); } if (StrUtil.isBlank(field)) { continue; } // 只有在非第一项前添加逗号 if (isFirst) { isFirst = false; } else { sql.append(StrUtil.COMMA); } sql.append(field); final Direction direction = order.getDirection(); if (null != direction) { sql.append(StrUtil.SPACE).append(direction); } } return this; }
@Test public void orderByTest() { SqlBuilder builder = SqlBuilder.create().select("id", "username").from("user") .join("role", SqlBuilder.Join.INNER) .on("user.id = role.user_id") .where(new Condition("age", ">=", 18), new Condition("username", "abc", Condition.LikeType.Contains) ).orderBy(new Order("id")); assertEquals("SELECT id,username FROM user INNER JOIN role ON user.id = role.user_id WHERE age >= ? AND username LIKE ? ORDER BY id", builder.build()); }
@Override public void configure(Map<String, ?> configs, boolean isKey) { if (listClass != null || inner != null) { log.error("Could not configure ListDeserializer as some parameters were already set -- listClass: {}, inner: {}", listClass, inner); throw new ConfigException("List deserializer was already initialized using a non-default constructor"); } configureListClass(configs, isKey); configureInnerSerde(configs, isKey); }
@Test public void testListValueDeserializerShouldThrowConfigExceptionDueAlreadyInitialized() { props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS, ArrayList.class); props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, Serdes.StringSerde.class); final ListDeserializer<Integer> initializedListDeserializer = new ListDeserializer<>(ArrayList.class, Serdes.Integer().deserializer()); final ConfigException exception = assertThrows( ConfigException.class, () -> initializedListDeserializer.configure(props, true) ); assertEquals("List deserializer was already initialized using a non-default constructor", exception.getMessage()); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithURL() throws SQLException { URL result = mock(URL.class); when(mergeResultSet.getValue(1, URL.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, URL.class), is(result)); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testStreamingPipelineFailsIfException() throws Exception { options.setStreaming(true); Pipeline pipeline = TestPipeline.create(options); PCollection<Integer> pc = pipeline.apply(Create.of(1, 2, 3)); PAssert.that(pc).containsInAnyOrder(1, 2, 3); DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class); when(mockJob.getState()).thenReturn(State.RUNNING); when(mockJob.getProjectId()).thenReturn("test-project"); when(mockJob.getJobId()).thenReturn("test-job"); when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class))) .thenAnswer( invocation -> { JobMessage message = new JobMessage(); message.setMessageText("FooException"); message.setTime(TimeUtil.toCloudTime(Instant.now())); message.setMessageImportance("JOB_MESSAGE_ERROR"); ((JobMessagesHandler) invocation.getArguments()[1]).process(Arrays.asList(message)); return State.CANCELLED; }); DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class); when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob); when(mockClient.getJobMetrics(anyString())) .thenReturn(generateMockMetricResponse(false /* success */, true /* tentative */)); TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient); expectedException.expect(RuntimeException.class); runner.run(pipeline, mockRunner); }
public MetricsBuilder enableMetricsInit(Boolean enableMetricsInit) { this.enableMetricsInit = enableMetricsInit; return getThis(); }
@Test void enableMetricsInit() { MetricsBuilder builder = MetricsBuilder.newBuilder(); builder.enableMetricsInit(true); Assertions.assertTrue(builder.build().getEnableMetricsInit()); }
public void patchLike(final Long boardId, final boolean isIncreaseLike) { Board board = findByIdUsingPessimisticLock(boardId); board.patchLike(isIncreaseLike); }
@Test void 게시글이_없다면_좋아요_처리를_못한다() { // when & then assertThatThrownBy(() -> boardService.patchLike(1L, true)) .isInstanceOf(BoardNotFoundException.class); }
public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo, boolean returnPartialResult) { AppsInfo allApps = new AppsInfo(); Map<String, AppInfo> federationAM = new HashMap<>(); Map<String, AppInfo> federationUAMSum = new HashMap<>(); for (AppInfo a : appsInfo) { // Check if this AppInfo is an AM if (a.getAMHostHttpAddress() != null) { // Insert in the list of AM federationAM.put(a.getAppId(), a); // Check if there are any UAM found before if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current AM with the found UAM mergeAMWithUAM(a, federationUAMSum.get(a.getAppId())); // Remove the sum of the UAMs federationUAMSum.remove(a.getAppId()); } // This AppInfo is an UAM } else { if (federationAM.containsKey(a.getAppId())) { // Merge the current UAM with its own AM mergeAMWithUAM(federationAM.get(a.getAppId()), a); } else if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current UAM with its own UAM and update the list of UAM federationUAMSum.put(a.getAppId(), mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a)); } else { // Insert in the list of UAM federationUAMSum.put(a.getAppId(), a); } } } // Check the remaining UAMs are depending or not from federation for (AppInfo a : federationUAMSum.values()) { if (returnPartialResult || (a.getName() != null && !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME) || a.getName().startsWith(PARTIAL_REPORT)))) { federationAM.put(a.getAppId(), a); } } allApps.addAll(new ArrayList<>(federationAM.values())); return allApps; }
@Test public void testMerge4DifferentApps() { AppsInfo apps = new AppsInfo(); int value = 1000; AppInfo app1 = new AppInfo(); app1.setAppId(APPID1.toString()); app1.setAMHostHttpAddress("http://i_am_the_AM1:1234"); app1.setState(YarnApplicationState.FINISHED); app1.setNumAMContainerPreempted(value); apps.add(app1); AppInfo app2 = new AppInfo(); app2.setAppId(APPID2.toString()); app2.setAMHostHttpAddress("http://i_am_the_AM2:1234"); app2.setState(YarnApplicationState.ACCEPTED); app2.setAllocatedVCores(2 * value); apps.add(app2); AppInfo app3 = new AppInfo(); app3.setAppId(APPID3.toString()); app3.setAMHostHttpAddress("http://i_am_the_AM3:1234"); app3.setState(YarnApplicationState.RUNNING); app3.setReservedMB(3 * value); apps.add(app3); AppInfo app4 = new AppInfo(); app4.setAppId(APPID4.toString()); app4.setAMHostHttpAddress("http://i_am_the_AM4:1234"); app4.setState(YarnApplicationState.NEW); app4.setAllocatedMB(4 * value); apps.add(app4); AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false); Assert.assertNotNull(result); Assert.assertEquals(4, result.getApps().size()); List<String> appIds = new ArrayList<String>(); AppInfo appInfo1 = null, appInfo2 = null, appInfo3 = null, appInfo4 = null; for (AppInfo app : result.getApps()) { appIds.add(app.getAppId()); if (app.getAppId().equals(APPID1.toString())) { appInfo1 = app; } if (app.getAppId().equals(APPID2.toString())) { appInfo2 = app; } if (app.getAppId().equals(APPID3.toString())) { appInfo3 = app; } if (app.getAppId().equals(APPID4.toString())) { appInfo4 = app; } } Assert.assertTrue(appIds.contains(APPID1.toString())); Assert.assertTrue(appIds.contains(APPID2.toString())); Assert.assertTrue(appIds.contains(APPID3.toString())); Assert.assertTrue(appIds.contains(APPID4.toString())); // Check preservations APP1 Assert.assertEquals(app1.getState(), appInfo1.getState()); Assert.assertEquals(app1.getNumAMContainerPreempted(), appInfo1.getNumAMContainerPreempted()); // Check preservations APP2 Assert.assertEquals(app2.getState(), appInfo2.getState()); Assert.assertEquals(app3.getAllocatedVCores(), appInfo3.getAllocatedVCores()); // Check preservations APP3 Assert.assertEquals(app3.getState(), appInfo3.getState()); Assert.assertEquals(app3.getReservedMB(), appInfo3.getReservedMB()); // Check preservations APP3 Assert.assertEquals(app4.getState(), appInfo4.getState()); Assert.assertEquals(app3.getAllocatedMB(), appInfo3.getAllocatedMB()); }
@Override public CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> deleteGroups( RequestContext context, List<String> groupIds, BufferSupplier bufferSupplier ) { if (!isActive.get()) { return CompletableFuture.completedFuture(DeleteGroupsRequest.getErrorResultCollection( groupIds, Errors.COORDINATOR_NOT_AVAILABLE )); } final List<CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection>> futures = new ArrayList<>(groupIds.size()); final Map<TopicPartition, List<String>> groupsByTopicPartition = new HashMap<>(); groupIds.forEach(groupId -> { // For backwards compatibility, we support DeleteGroups for the empty group id. if (groupId == null) { futures.add(CompletableFuture.completedFuture(DeleteGroupsRequest.getErrorResultCollection( Collections.singletonList(null), Errors.INVALID_GROUP_ID ))); } else { final TopicPartition topicPartition = topicPartitionFor(groupId); groupsByTopicPartition .computeIfAbsent(topicPartition, __ -> new ArrayList<>()) .add(groupId); } }); groupsByTopicPartition.forEach((topicPartition, groupList) -> { CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> future = runtime.scheduleWriteOperation( "delete-groups", topicPartition, Duration.ofMillis(config.offsetCommitTimeoutMs()), coordinator -> coordinator.deleteGroups(context, groupList) ).exceptionally(exception -> handleOperationException( "delete-groups", groupList, exception, (error, __) -> DeleteGroupsRequest.getErrorResultCollection(groupList, error) )); futures.add(future); }); return FutureUtils.combineFutures(futures, DeleteGroupsResponseData.DeletableGroupResultCollection::new, // We don't use res.addAll(future.join()) because DeletableGroupResultCollection is an ImplicitLinkedHashMultiCollection, // which has requirements for adding elements (see ImplicitLinkedHashCollection.java#add). (accumulator, newResults) -> newResults.forEach(result -> accumulator.add(result.duplicate()))); }
@Test public void testDeleteGroupsWhenNotStarted() throws ExecutionException, InterruptedException { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, mock(GroupCoordinatorMetrics.class), createConfigManager() ); CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> future = service.deleteGroups( requestContext(ApiKeys.DELETE_GROUPS), Collections.singletonList("foo"), BufferSupplier.NO_CACHING ); assertEquals( new DeleteGroupsResponseData.DeletableGroupResultCollection( Collections.singletonList(new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("foo") .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) ).iterator() ), future.get() ); }
@Udf public <T> boolean contains( @UdfParameter final List<T> array, @UdfParameter final T val ) { return array != null && array.contains(val); }
@Test public void shouldReturnFalseOnEmptyList() { assertFalse(udf.contains(Collections.emptyList(), true)); assertFalse(udf.contains(Collections.emptyList(), false)); assertFalse(udf.contains(Collections.emptyList(), null)); assertFalse(udf.contains(Collections.emptyList(), 1.0)); assertFalse(udf.contains(Collections.emptyList(), 100)); assertFalse(udf.contains(Collections.emptyList(), "abc")); assertFalse(udf.contains(Collections.emptyList(), "")); }
@Override public Collection<LossMeasurementStatCurrent> getLmHistoricalStats( MdId mdName, MaIdShort maName, MepId mepId, SoamId lmId) { throw new UnsupportedOperationException("Not yet implemented"); }
@Test public void testGetLmhistoricalStats() { //TODO: Implement underlying method try { soamManager.getLmHistoricalStats(MDNAME1, MANAME1, MEPID1, LMID101); fail("Expecting UnsupportedOperationException"); } catch (UnsupportedOperationException e) { } }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void doNotCastNullLiteral() { final TypedExpression left = expr(THIS_PLACEHOLDER + ".isApproved()", java.lang.Boolean.class); final TypedExpression right = expr("null", MethodUtils.NullType.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedRight()).isEqualTo(expr("null", MethodUtils.NullType.class)); }
public static void refreshSuperUserGroupsConfiguration() { //load server side configuration; refreshSuperUserGroupsConfiguration(new Configuration()); }
@Test public void testProxyUsersWithProviderOverride() throws Exception { Configuration conf = new Configuration(); conf.set( CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS, "org.apache.hadoop.security.authorize.TestProxyUsers$TestDummyImpersonationProvider"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); // First try proxying a group that's allowed UserGroupInformation realUserUgi = UserGroupInformation .createUserForTesting(REAL_USER_NAME, SUDO_GROUP_NAMES); UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting( PROXY_USER_NAME, realUserUgi, GROUP_NAMES); // From good IP assertAuthorized(proxyUserUgi, "1.2.3.4"); // From bad IP assertAuthorized(proxyUserUgi, "1.2.3.5"); // Now try proxying a group that's not allowed realUserUgi = UserGroupInformation .createUserForTesting(REAL_USER_NAME, GROUP_NAMES); proxyUserUgi = UserGroupInformation.createProxyUserForTesting( PROXY_USER_NAME, realUserUgi, GROUP_NAMES); // From good IP assertNotAuthorized(proxyUserUgi, "1.2.3.4"); // From bad IP assertNotAuthorized(proxyUserUgi, "1.2.3.5"); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void simleVariable() throws ScanException { String input = "${abc}"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(Token.START_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "abc")); witnessList.add(Token.CURLY_RIGHT_TOKEN); assertEquals(witnessList, tokenList); }
public String decrypt(String encryptedText) { Matcher matcher = ENCRYPTED_PATTERN.matcher(encryptedText); if (matcher.matches()) { Cipher cipher = ciphers.get(matcher.group(1).toLowerCase(Locale.ENGLISH)); if (cipher != null) { return cipher.decrypt(matcher.group(2)); } } return encryptedText; }
@Test public void decrypt() { Encryption encryption = new Encryption(null); assertThat(encryption.decrypt("{b64}Zm9v")).isEqualTo("foo"); }
public Map<Service, ConcurrentMap<String, InstanceMetadata>> getInstanceMetadataSnapshot() { ConcurrentMap<Service, ConcurrentMap<String, InstanceMetadata>> result = new ConcurrentHashMap<>( instanceMetadataMap.size()); result.putAll(instanceMetadataMap); return result; }
@Test void testGetInstanceMetadataSnapshot() { Map<Service, ConcurrentMap<String, InstanceMetadata>> instanceMetadataSnapshot = namingMetadataManager.getInstanceMetadataSnapshot(); assertEquals(1, instanceMetadataSnapshot.size()); }
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) { final String storeName = storeQueryParameters.storeName(); final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType(); final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType); if (!globalStore.isEmpty()) { return queryableStoreType.create(globalStoreProvider, storeName); } return queryableStoreType.create( new WrappingStoreProvider(storeProviders.values(), storeQueryParameters), storeName ); }
@Test public void shouldThrowExceptionIfKVStoreDoesntExist() { assertThrows(InvalidStateStoreException.class, () -> storeProvider.getStore( StoreQueryParameters.fromNameAndType("not-a-store", QueryableStoreTypes.keyValueStore())).get("1")); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldLoadFunctionWithMapReturnType() { // Given: final UdfFactory toMap = FUNC_REG.getUdfFactory(FunctionName.of("tomap")); // When: final List<SqlArgument> args = Collections.singletonList(SqlArgument.of(SqlTypes.STRING)); final KsqlScalarFunction function = toMap.getFunction(args); // Then: assertThat( function.getReturnType(args), equalTo(SqlTypes.map(SqlTypes.STRING, SqlTypes.STRING)) ); }
public static BundleDistribution bundleProcessingThreadDistribution( String shortId, MetricName name) { return new BundleProcessingThreadDistribution(shortId, name); }
@Test public void testAccurateBundleDistributionWithMutations() throws Exception { Map<String, ByteString> report = new HashMap<>(); BundleDistribution bundleDistribution = Metrics.bundleProcessingThreadDistribution(TEST_ID, TEST_NAME); bundleDistribution.update(7); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals( report, Collections.singletonMap( TEST_ID, MonitoringInfoEncodings.encodeInt64Distribution(DistributionData.singleton(7)))); report.clear(); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals(report, Collections.emptyMap()); bundleDistribution.update(5, 2, 2, 3); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals( report, Collections.singletonMap( TEST_ID, MonitoringInfoEncodings.encodeInt64Distribution(DistributionData.create(12, 3, 2, 7)))); report.clear(); // Test re-use of the metrics after reset bundleDistribution.reset(); bundleDistribution.update(7); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals( report, Collections.singletonMap( TEST_ID, MonitoringInfoEncodings.encodeInt64Distribution(DistributionData.singleton(7)))); report.clear(); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals(report, Collections.emptyMap()); bundleDistribution.update(5, 2, 2, 3); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals( report, Collections.singletonMap( TEST_ID, MonitoringInfoEncodings.encodeInt64Distribution(DistributionData.create(12, 3, 2, 7)))); }
@VisibleForTesting @InterfaceAudience.Private ShutdownHookManager() { }
@Test public void shutdownHookManager() { assertNotNull("No ShutdownHookManager", mgr); assertEquals(0, mgr.getShutdownHooksInOrder().size()); Hook hook1 = new Hook("hook1", 0, false); Hook hook2 = new Hook("hook2", 0, false); Hook hook3 = new Hook("hook3", 1000, false); Hook hook4 = new Hook("hook4", 25000, true); Hook hook5 = new Hook("hook5", (SERVICE_SHUTDOWN_TIMEOUT_DEFAULT + 1) * 1000, true); mgr.addShutdownHook(hook1, 0); assertTrue(mgr.hasShutdownHook(hook1)); assertEquals(1, mgr.getShutdownHooksInOrder().size()); assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0).getHook()); assertTrue(mgr.removeShutdownHook(hook1)); assertFalse(mgr.hasShutdownHook(hook1)); assertFalse(mgr.removeShutdownHook(hook1)); mgr.addShutdownHook(hook1, 0); assertTrue(mgr.hasShutdownHook(hook1)); assertEquals(1, mgr.getShutdownHooksInOrder().size()); assertEquals(SERVICE_SHUTDOWN_TIMEOUT_DEFAULT, mgr.getShutdownHooksInOrder().get(0).getTimeout()); mgr.addShutdownHook(hook2, 1); assertTrue(mgr.hasShutdownHook(hook1)); assertTrue(mgr.hasShutdownHook(hook2)); assertEquals(2, mgr.getShutdownHooksInOrder().size()); assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0).getHook()); assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1).getHook()); // Test hook finish without timeout mgr.addShutdownHook(hook3, 2, 4, TimeUnit.SECONDS); assertTrue(mgr.hasShutdownHook(hook3)); assertEquals(hook3, mgr.getShutdownHooksInOrder().get(0).getHook()); assertEquals(4, mgr.getShutdownHooksInOrder().get(0).getTimeout()); // Test hook finish with timeout; highest priority int hook4timeout = 2; mgr.addShutdownHook(hook4, 3, hook4timeout, TimeUnit.SECONDS); assertTrue(mgr.hasShutdownHook(hook4)); assertEquals(hook4, mgr.getShutdownHooksInOrder().get(0).getHook()); assertEquals(2, mgr.getShutdownHooksInOrder().get(0).getTimeout()); // a default timeout hook and verify it gets the default timeout mgr.addShutdownHook(hook5, 5); ShutdownHookManager.HookEntry hookEntry5 = mgr.getShutdownHooksInOrder() .get(0); assertEquals(hook5, hookEntry5.getHook()); assertEquals("default timeout not used", ShutdownHookManager.getShutdownTimeout(new Configuration()), hookEntry5.getTimeout()); assertEquals("hook priority", 5, hookEntry5.getPriority()); // remove this to avoid a longer sleep in the test run assertTrue("failed to remove " + hook5, mgr.removeShutdownHook(hook5)); // now execute the hook shutdown sequence INVOCATION_COUNT.set(0); LOG.info("invoking executeShutdown()"); int timeouts = mgr.executeShutdown(); LOG.info("Shutdown completed"); assertEquals("Number of timed out hooks", 1, timeouts); List<ShutdownHookManager.HookEntry> hooks = mgr.getShutdownHooksInOrder(); // analyze the hooks for (ShutdownHookManager.HookEntry entry : hooks) { Hook hook = (Hook) entry.getHook(); assertTrue("Was not invoked " + hook, hook.invoked); // did any hook raise an exception? hook.maybeThrowAssertion(); } // check the state of some of the invoked hooks // hook4 was invoked first, but it timed out. assertEquals("Expected to be invoked first " + hook4, 1, hook4.invokedOrder); assertFalse("Expected to time out " + hook4, hook4.completed); // hook1 completed, but in order after the others, so its start time // is the longest. assertTrue("Expected to complete " + hook1, hook1.completed); long invocationInterval = hook1.startTime - hook4.startTime; assertTrue("invocation difference too short " + invocationInterval, invocationInterval >= hook4timeout * 1000); assertTrue("sleeping hook4 blocked other threads for " + invocationInterval, invocationInterval < hook4.sleepTime); // finally, clear the hooks mgr.clearShutdownHooks(); // and verify that the hooks are empty assertFalse(mgr.hasShutdownHook(hook1)); assertEquals("shutdown hook list is not empty", 0, mgr.getShutdownHooksInOrder().size()); }
@Override public final InputStream getInputStream(final int columnIndex, final String type) throws SQLException { throw new SQLFeatureNotSupportedException(String.format("Get input stream from `%s`", type)); }
@Test void assertGetInputStream() { assertThrows(SQLFeatureNotSupportedException.class, () -> memoryMergedResult.getInputStream(1, "ascii")); }
AcknowledgeType acknowledgeType() { if (options.has(rejectOpt)) { return AcknowledgeType.REJECT; } else if (options.has(releaseOpt)) { return AcknowledgeType.RELEASE; } else { return AcknowledgeType.ACCEPT; } }
@Test public void testReleaseOption() throws IOException { String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "--topic", "test", "--release" }; ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args); assertEquals(AcknowledgeType.RELEASE, config.acknowledgeType()); }
public static List<TransferAction> forTransfer(final Transfer.Type t) { switch(t) { case sync: return Arrays.asList( TransferAction.download, TransferAction.upload, TransferAction.mirror ); case copy: return Arrays.asList( TransferAction.overwrite, TransferAction.comparison ); default: return Arrays.asList( TransferAction.resume, TransferAction.overwrite, TransferAction.rename, TransferAction.renameexisting, TransferAction.skip, TransferAction.comparison ); } }
@Test public void testActionsForCopy() { assertEquals(2, (TransferAction.forTransfer(Transfer.Type.copy).size())); assertTrue(TransferAction.forTransfer(Transfer.Type.copy).contains(TransferAction.comparison)); assertTrue(TransferAction.forTransfer(Transfer.Type.copy).contains(TransferAction.overwrite)); }
@Override public AppResponse process(Flow flow, AppRequest request) { if (!appSession.getWithBsn()) { digidClient.remoteLog("1487", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), "hidden", true)); return new NokResponse("no_bsn_on_account"); } boolean reRequestLetter = flow.getName().equals(ReApplyActivateActivationCode.NAME); if(reRequestLetter){ digidClient.remoteLog("914", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } Map<String, Object> result = digidClient.createLetter(appSession.getAccountId(), appSession.getActivationMethod(), reRequestLetter); if (result.get(ERROR) != null){ if(result.get(ERROR).equals("too_often")){ digidClient.remoteLog("906", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new NokTooOftenResponse((Map<String, Object>) result.get(PAYLOAD), (String) result.get(ERROR)); } else if(result.get(ERROR).equals("too_soon")){ digidClient.remoteLog("758", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); } else if(result.get(ERROR).equals("too_many_letter_requests")){ digidClient.remoteLog("1554", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } return new NokResponse((String) result.get(ERROR)); } appSession.setRegistrationId(((Integer) result.get(lowerUnderscore(REGISTRATION_ID))).longValue()); digidClient.remoteLog("904", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new OkResponse(); }
@Test void processTooOften() { when(digidClientMock.createLetter(mockedAppSession.getAccountId(), mockedAppSession.getActivationMethod(), true)).thenReturn(tooOftenCreateLetterResponse); when(mockedFlow.getName()).thenReturn(ReApplyActivateActivationCode.NAME); NokTooOftenResponse appResponse = (NokTooOftenResponse) letterSent.process(mockedFlow, mockedAbstractAppRequest); verify(digidClientMock, times(1)).remoteLog("906", Map.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId(), lowerUnderscore(APP_CODE), "DAEA0", lowerUnderscore(DEVICE_NAME), TEST_DEVICE_NAME)); assertEquals(TOO_OFTEN, appResponse.getError()); assertEquals("timestamp", appResponse.getPayload().get("next_registration_date")); assertEquals(5, appResponse.getPayload().get("blokkering_digid_app_aanvragen")); }
public static boolean isRetryOrDlqTopic(String topic) { if (StringUtils.isBlank(topic)) { return false; } return topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX) || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX); }
@Test public void testIsRetryOrDlqTopicWithNullTopic() { String topic = null; boolean result = BrokerMetricsManager.isRetryOrDlqTopic(topic); assertThat(result).isFalse(); }
public String toRst() { StringBuilder b = new StringBuilder(); for (ConfigKey key : sortedConfigs()) { if (key.internalConfig) { continue; } getConfigKeyRst(key, b); b.append("\n"); } return b.toString(); }
@Test public void toRst() { final ConfigDef def = new ConfigDef() .define("opt1", Type.STRING, "a", ValidString.in("a", "b", "c"), Importance.HIGH, "docs1") .define("opt2", Type.INT, Importance.MEDIUM, "docs2") .define("opt3", Type.LIST, Arrays.asList("a", "b"), Importance.LOW, "docs3") .define("opt4", Type.BOOLEAN, false, Importance.LOW, null); final String expectedRst = "``opt2``\n" + " docs2\n" + "\n" + " * Type: int\n" + " * Importance: medium\n" + "\n" + "``opt1``\n" + " docs1\n" + "\n" + " * Type: string\n" + " * Default: a\n" + " * Valid Values: [a, b, c]\n" + " * Importance: high\n" + "\n" + "``opt3``\n" + " docs3\n" + "\n" + " * Type: list\n" + " * Default: a,b\n" + " * Importance: low\n" + "\n" + "``opt4``\n" + "\n" + " * Type: boolean\n" + " * Default: false\n" + " * Importance: low\n" + "\n"; assertEquals(expectedRst, def.toRst()); }
public Publisher<V> valueIterator() { return valueIterator(null); }
@Test public void testValueIterator() { RMapRx<Integer, Integer> map = redisson.getMap("simple"); sync(map.put(1, 0)); sync(map.put(3, 5)); sync(map.put(4, 6)); sync(map.put(7, 8)); List<Integer> values = new ArrayList<Integer>(Arrays.asList(0, 5, 6, 8)); for (Iterator<Integer> iterator = toIterator(map.valueIterator()); iterator.hasNext();) { Integer value = iterator.next(); if (!values.remove(value)) { Assertions.fail(); } } Assertions.assertEquals(0, values.size()); }
public <T> T readValue(Class<T> type, InputStream entityStream) throws IOException { ObjectReader reader = DeserializerStringCache.init( Optional.ofNullable(objectReaderByClass.get(type)).map(Supplier::get).orElseGet(()->mapper.readerFor(type)) ); try { return reader.readValue(entityStream); } finally { DeserializerStringCache.clear(reader, CacheScope.GLOBAL_SCOPE); } }
@Test public void testInstanceInfoXStreamEncodeJacksonDecode() throws Exception { InstanceInfo original = INSTANCE_INFO_1_A1; // Encode ByteArrayOutputStream captureStream = new ByteArrayOutputStream(); new EntityBodyConverter().write(original, captureStream, MediaType.APPLICATION_JSON_TYPE); byte[] encoded = captureStream.toByteArray(); // Decode InputStream source = new ByteArrayInputStream(encoded); InstanceInfo decoded = codec.readValue(InstanceInfo.class, source); assertTrue(EurekaEntityComparators.equal(decoded, original)); }
public TextBlock getOriginal() { return this.original; }
@Test public void getOriginal_returns_original() { assertThat(new Duplication(SOME_ORIGINAL_TEXTBLOCK, Arrays.asList(new InnerDuplicate(TEXT_BLOCK_1))) .getOriginal()).isSameAs(SOME_ORIGINAL_TEXTBLOCK); }