focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Collection<String> childNames() { ArrayList<String> childNames = new ArrayList<>(); for (Integer brokerId : image.controllers().keySet()) { childNames.add(brokerId.toString()); } return childNames; }
@Test public void testChildNames() { assertEquals(Collections.singletonList("2"), NODE.childNames()); }
public static OP_TYPE getOpType(final List<Field<?>> fields, final Model model, final String targetFieldName) { return Stream.of(getOpTypeFromTargets(model.getTargets(), targetFieldName), getOpTypeFromMiningFields(model.getMiningSchema(), targetFieldName), getOpTypeFromFields(fields, targetFieldName)) .filter(Optional::isPresent) .map(Optional::get) .findFirst() .orElseThrow(() -> new KiePMMLInternalException(String.format("Failed to find OpType for field" + " %s", targetFieldName))); }
@Test void getOpTypeByMiningFieldsNotFound() { assertThatExceptionOfType(KiePMMLInternalException.class).isThrownBy(() -> { final Model model = new RegressionModel(); final DataDictionary dataDictionary = new DataDictionary(); final MiningSchema miningSchema = new MiningSchema(); IntStream.range(0, 3).forEach(i -> { String fieldName = "field" + i; final DataField dataField = getRandomDataField(); dataField.setName(fieldName); dataDictionary.addDataFields(dataField); final MiningField miningField = getRandomMiningField(); miningField.setName(dataField.getName()); miningSchema.addMiningFields(miningField); }); model.setMiningSchema(miningSchema); org.kie.pmml.compiler.api.utils.ModelUtils.getOpType(getFieldsFromDataDictionary(dataDictionary), model, "NOT_EXISTING"); }); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理 public void updateNotifyTemplate(NotifyTemplateSaveReqVO updateReqVO) { // 校验存在 validateNotifyTemplateExists(updateReqVO.getId()); // 校验站内信编码是否重复 validateNotifyTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode()); // 更新 NotifyTemplateDO updateObj = BeanUtils.toBean(updateReqVO, NotifyTemplateDO.class); updateObj.setParams(parseTemplateContentParams(updateObj.getContent())); notifyTemplateMapper.updateById(updateObj); }
@Test public void testUpdateNotifyTemplate_notExists() { // 准备参数 NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> notifyTemplateService.updateNotifyTemplate(reqVO), NOTIFY_TEMPLATE_NOT_EXISTS); }
@Override public ConfigData get(String path) { return get(path, null); }
@Test void testGetEnvVarsByKeyList() { Set<String> keyList = new HashSet<>(Arrays.asList("test_var1", "secret_var2")); Set<String> keys = envVarConfigProvider.get(null, keyList).data().keySet(); assertEquals(keyList, keys); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("list", concurrency); try { final String prefix = this.createPrefix(directory); if(log.isDebugEnabled()) { log.debug(String.format("List with prefix %s", prefix)); } final Path bucket = containerService.getContainer(directory); final AttributedList<Path> objects = new AttributedList<>(); String priorLastKey = null; String priorLastVersionId = null; long revision = 0L; String lastKey = null; boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory); do { final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false); // Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first. for(BaseVersionOrDeleteMarker marker : chunk.getItems()) { final String key = URIEncoder.decode(marker.getKey()); if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Skip placeholder key %s", key)); } hasDirectoryPlaceholder = true; continue; } final PathAttributes attr = new PathAttributes(); attr.setVersionId(marker.getVersionId()); if(!StringUtils.equals(lastKey, key)) { // Reset revision for next file revision = 0L; } attr.setRevision(++revision); attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest()); if(marker.isDeleteMarker()) { attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true))); } attr.setModificationDate(marker.getLastModified().getTime()); attr.setRegion(bucket.attributes().getRegion()); if(marker instanceof S3Version) { final S3Version object = (S3Version) marker; attr.setSize(object.getSize()); if(StringUtils.isNotBlank(object.getEtag())) { attr.setETag(StringUtils.remove(object.getEtag(), "\"")); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\""))); } if(StringUtils.isNotBlank(object.getStorageClass())) { attr.setStorageClass(object.getStorageClass()); } } final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr); if(metadata) { f.withAttributes(attributes.find(f)); } objects.add(f); lastKey = key; } final String[] prefixes = chunk.getCommonPrefixes(); final List<Future<Path>> folders = new ArrayList<>(); for(String common : prefixes) { if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) { continue; } folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common))); } for(Future<Path> f : folders) { try { objects.add(Uninterruptibles.getUninterruptibly(f)); } catch(ExecutionException e) { log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage())); for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null; priorLastVersionId = chunk.getNextVersionIdMarker(); listener.chunk(directory, objects); } while(priorLastKey != null); if(!hasDirectoryPlaceholder && objects.isEmpty()) { // Only for AWS if(S3Session.isAwsHostname(session.getHost().getHostname())) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } } else { // Handle missing prefix for directory placeholders in Minio final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false); if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) { throw new NotfoundException(directory.getAbsolute()); } } } return objects; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testListVirtualHostStyle() throws Exception { final AttributedList<Path> list = new S3VersionedObjectListService(virtualhost, new S3AccessControlListFeature(virtualhost)).list( new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), new DisabledListProgressListener()); for(Path p : list) { assertEquals(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), p.getParent()); if(p.isFile()) { assertNotEquals(-1L, p.attributes().getModificationDate()); if(!p.attributes().isDuplicate()) { assertNotEquals(-1L, p.attributes().getSize()); assertNotNull(p.attributes().getETag()); assertNotNull(p.attributes().getStorageClass()); assertEquals("null", p.attributes().getVersionId()); } } } }
@SuppressWarnings("unchecked") public static <E> void notifyAll(E event) { Preconditions.checkNotNull(event, "Cannot notify listeners for a null event."); Queue<Listener<?>> list = LISTENERS.get(event.getClass()); if (list != null) { for (Listener<?> value : list) { Listener<E> listener = (Listener<E>) value; listener.notify(event); } } }
@Test public void testEvent2() { Event2 e2 = new Event2(); Listeners.notifyAll(e2); assertThat(TestListener.get().e2).isEqualTo(e2); }
public static void checkServiceDependencySatisified(Service service) { while (!serviceDependencySatisfied(service)) { try { LOG.info("Waiting for service dependencies."); Thread.sleep(15000L); } catch (InterruptedException e) { } } }
@Test public void testServiceDependencies() { Thread thread = new Thread() { @Override public void run() { Service service = createExampleApplication(); Component compa = createComponent("compa"); Component compb = createComponent("compb"); service.addComponent(compa); service.addComponent(compb); List<String> dependencies = new ArrayList<String>(); dependencies.add("abc"); service.setDependencies(dependencies); Service dependent = createExampleApplication(); dependent.setState(ServiceState.STOPPED); ServiceApiUtil.checkServiceDependencySatisified(service); } }; thread.start(); try { Thread.sleep(1000); } catch (InterruptedException e) { } Assert.assertTrue(thread.isAlive()); }
@Override public OverlayData createOverlayData(ComponentName remoteApp) { final OverlayData original = mOriginal.createOverlayData(remoteApp); if (original.isValid() || mFixInvalid) { final int backgroundLuminance = luminance(original.getPrimaryColor()); final int diff = backgroundLuminance - luminance(original.getPrimaryTextColor()); if (mRequiredTextColorDiff > Math.abs(diff)) { if (backgroundLuminance > GRAY_LUM) { // closer to white, text will be black original.setPrimaryTextColor(Color.BLACK); original.setSecondaryTextColor(Color.DKGRAY); } else { original.setPrimaryTextColor(Color.WHITE); original.setSecondaryTextColor(Color.LTGRAY); } } } return original; }
@Test public void testReturnsFixedIfInvalidButWasAskedToFix() { mUnderTest = new OverlayDataNormalizer(mOriginal, 96, true); OverlayData original = setupOriginal(Color.GRAY, Color.GRAY, Color.GRAY); final OverlayData fixed = mUnderTest.createOverlayData(mTestComponent); Assert.assertSame(original, fixed); Assert.assertTrue(fixed.isValid()); Assert.assertEquals(Color.GRAY, fixed.getPrimaryColor()); Assert.assertEquals(Color.GRAY, fixed.getPrimaryDarkColor()); Assert.assertEquals(Color.WHITE, fixed.getPrimaryTextColor()); Assert.assertEquals(Color.LTGRAY, fixed.getSecondaryTextColor()); }
public void validateReadPermission(String serverUrl, String personalAccessToken) { HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos"); doGet(personalAccessToken, url, body -> buildGson().fromJson(body, RepositoryList.class)); }
@Test public void fail_validate_read_permission_when_permissions_are_not_granted() { server.enqueue(new MockResponse().setResponseCode(401) .setBody("com.atlassian.bitbucket.AuthorisationException You are not permitted to access this resource")); String serverUrl = server.url("/").toString(); assertThatThrownBy(() -> underTest.validateReadPermission(serverUrl, "token")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid personal access token"); }
public Collection<PlanCoordinator> coordinators() { return Collections.unmodifiableCollection(mCoordinators.values()); }
@Test public void testRetentionTime() throws Exception { long retentionMs = FormatUtils.parseTimeSize("24h"); PlanTracker tracker = new PlanTracker(10, retentionMs, -1, mMockWorkflowTracker); assertEquals("tracker should be empty", 0, tracker.coordinators().size()); fillJobTracker(tracker, 10); finishAllJobs(tracker); mException.expect(ResourceExhaustedException.class); addJob(tracker, 100); }
@Override public DnsServerAddressStream nameServerAddressStream(String hostname) { for (;;) { int i = hostname.indexOf('.', 1); if (i < 0 || i == hostname.length() - 1) { return defaultNameServerAddresses.stream(); } DnsServerAddresses addresses = domainToNameServerStreamMap.get(hostname); if (addresses != null) { return addresses.stream(); } hostname = hostname.substring(i + 1); } }
@Test public void nameServerAddressStreamShouldBeRotationalWhenRotationOptionsIsPresent( @TempDir Path tempDir) throws Exception { File f = buildFile(tempDir, "options rotate\n" + "domain linecorp.local\n" + "nameserver 127.0.0.2\n" + "nameserver 127.0.0.3\n" + "nameserver 127.0.0.4\n"); UnixResolverDnsServerAddressStreamProvider p = new UnixResolverDnsServerAddressStreamProvider(f, null); DnsServerAddressStream stream = p.nameServerAddressStream(""); assertHostNameEquals("127.0.0.2", stream.next()); assertHostNameEquals("127.0.0.3", stream.next()); assertHostNameEquals("127.0.0.4", stream.next()); stream = p.nameServerAddressStream(""); assertHostNameEquals("127.0.0.3", stream.next()); assertHostNameEquals("127.0.0.4", stream.next()); assertHostNameEquals("127.0.0.2", stream.next()); stream = p.nameServerAddressStream(""); assertHostNameEquals("127.0.0.4", stream.next()); assertHostNameEquals("127.0.0.2", stream.next()); assertHostNameEquals("127.0.0.3", stream.next()); stream = p.nameServerAddressStream(""); assertHostNameEquals("127.0.0.2", stream.next()); assertHostNameEquals("127.0.0.3", stream.next()); assertHostNameEquals("127.0.0.4", stream.next()); }
public static Coordinate bd09toWgs84(double lng, double lat) { final Coordinate gcj02 = bd09ToGcj02(lng, lat); return gcj02ToWgs84(gcj02.lng, gcj02.lat); }
@Test public void bd09toWgs84Test() { final CoordinateUtil.Coordinate coordinate = CoordinateUtil.bd09toWgs84(116.404, 39.915); assertEquals(116.3913836995125D, coordinate.getLng(), 0); assertEquals(39.907253214522164D, coordinate.getLat(), 0); }
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) { if (LOG.isTraceEnabled()) { LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}" + " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb, toNormalizedMap(), used.toNormalizedMap()); } double min = 1.0; if (usedMemoryMb > totalMemoryMb) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } if (totalMemoryMb != 0.0) { min = Math.min(min, usedMemoryMb / totalMemoryMb); } double totalCpu = getTotalCpu(); if (used.getTotalCpu() > totalCpu) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } if (totalCpu != 0.0) { min = Math.min(min, used.getTotalCpu() / totalCpu); } if (used.otherResources.length > otherResources.length) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } for (int i = 0; i < otherResources.length; i++) { if (otherResources[i] == 0.0) { //Skip any resources where the total is 0, the percent used for this resource isn't meaningful. //We fall back to prioritizing by cpu, memory and any other resources by ignoring this value continue; } if (i >= used.otherResources.length) { //Resources missing from used are using none of that resource return 0; } if (used.otherResources[i] > otherResources[i]) { String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]); throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info); } min = Math.min(min, used.otherResources[i] / otherResources[i]); } return min * 100.0; }
@Test public void testCalculateMinWithUnusedResource() { Map<String, Double> allResourcesMap = new HashMap<>(); allResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 2.0); allResourcesMap.put(gpuResourceName, 10.0); NormalizedResources resources = new NormalizedResources(normalize(allResourcesMap)); Map<String, Double> usedResourcesMap = new HashMap<>(); usedResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 1.0); NormalizedResources usedResources = new NormalizedResources(normalize(usedResourcesMap)); double min = resources.calculateMinPercentageUsedBy(usedResources, 4, 1); //The resource that is not used should count as if it is being used 0% assertThat(min, is(0.0)); }
public static <T> PaginatedResponse<T> create(String listKey, PaginatedList<T> paginatedList) { return new PaginatedResponse<>(listKey, paginatedList, null, null); }
@Test public void serializeWithQueryAndContext() throws Exception { final ImmutableList<String> values = ImmutableList.of("hello", "world"); final ImmutableMap<String, Object> context = ImmutableMap.of("context1", "wow"); final PaginatedList<String> paginatedList = new PaginatedList<>(values, values.size(), 1, 10); final PaginatedResponse<String> response = PaginatedResponse.create("foo", paginatedList, "query1", context); final DocumentContext ctx = JsonPath.parse(objectMapper.writeValueAsString(response)); final JsonPathAssert jsonPathAssert = JsonPathAssert.assertThat(ctx); jsonPathAssert.jsonPathAsString("$.query").isEqualTo("query1"); jsonPathAssert.jsonPathAsInteger("$.total").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.count").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.page").isEqualTo(1); jsonPathAssert.jsonPathAsInteger("$.per_page").isEqualTo(10); jsonPathAssert.jsonPathAsString("$.foo[0]").isEqualTo("hello"); jsonPathAssert.jsonPathAsString("$.foo[1]").isEqualTo("world"); jsonPathAssert.jsonPathAsString("$.context.context1").isEqualTo("wow"); }
public static <T> Read<T> readMessagesWithAttributesWithCoderAndParseFn( Coder<T> coder, SimpleFunction<PubsubMessage, T> parseFn) { return Read.newBuilder(parseFn).setCoder(coder).setNeedsAttributes(true).build(); }
@Test public void testReadMessagesWithAttributesWithCoderAndParseFn() { ImmutableList<IncomingMessage> inputs = ImmutableList.of( messageWithSuffixAttribute("foo", "-some-suffix"), messageWithSuffixAttribute("bar", "-some-other-suffix")); clientFactory = PubsubTestClient.createFactoryForPull(CLOCK, SUBSCRIPTION, 60, inputs); PCollection<String> read = pipeline.apply( PubsubIO.readMessagesWithAttributesWithCoderAndParseFn( StringUtf8Coder.of(), new AppendSuffixAttributeToStringPayloadParseFn()) .fromSubscription(SUBSCRIPTION.getPath()) .withClock(CLOCK) .withClientFactory(clientFactory)); List<String> outputs = ImmutableList.of("foo-some-suffix", "bar-some-other-suffix"); PAssert.that(read).containsInAnyOrder(outputs); pipeline.run(); }
synchronized ActivateWorkResult activateWorkForKey(ExecutableWork executableWork) { ShardedKey shardedKey = executableWork.work().getShardedKey(); Deque<ExecutableWork> workQueue = activeWork.getOrDefault(shardedKey, new ArrayDeque<>()); // This key does not have any work queued up on it. Create one, insert Work, and mark the work // to be executed. if (!activeWork.containsKey(shardedKey) || workQueue.isEmpty()) { workQueue.addLast(executableWork); activeWork.put(shardedKey, workQueue); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.EXECUTE; } // Check to see if we have this work token queued. Iterator<ExecutableWork> workIterator = workQueue.iterator(); while (workIterator.hasNext()) { ExecutableWork queuedWork = workIterator.next(); if (queuedWork.id().equals(executableWork.id())) { return ActivateWorkResult.DUPLICATE; } if (queuedWork.id().cacheToken() == executableWork.id().cacheToken()) { if (executableWork.id().workToken() > queuedWork.id().workToken()) { // Check to see if the queuedWork is active. We only want to remove it if it is NOT // currently active. if (!queuedWork.equals(workQueue.peek())) { workIterator.remove(); decrementActiveWorkBudget(queuedWork.work()); } // Continue here to possibly remove more non-active stale work that is queued. } else { return ActivateWorkResult.STALE; } } } // Queue the work for later processing. workQueue.addLast(executableWork); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.QUEUED; }
@Test public void testActivateWorkForKey_matchingCacheTokens_newWorkTokenLesser_STALE() { long cacheToken = 1L; long queuedWorkToken = 10L; long newWorkToken = queuedWorkToken / 2; ShardedKey shardedKey = shardedKey("someKey", 1L); ExecutableWork queuedWork = createWork(createWorkItem(queuedWorkToken, cacheToken, shardedKey)); ExecutableWork newWork = createWork(createWorkItem(newWorkToken, cacheToken, shardedKey)); activeWorkState.activateWorkForKey(queuedWork); ActivateWorkResult activateWorkResult = activeWorkState.activateWorkForKey(newWork); assertEquals(ActivateWorkResult.STALE, activateWorkResult); assertFalse(readOnlyActiveWork.get(shardedKey).contains(newWork)); assertEquals(queuedWork, readOnlyActiveWork.get(shardedKey).peek()); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void atLiteralDateAndTime() { String inputExpression = "@\"2016-07-29T05:48:23\""; BaseNode bool = parse(inputExpression); assertThat(bool).isInstanceOf(AtLiteralNode.class); assertThat(bool.getResultType()).isEqualTo(BuiltInType.DATE_TIME); assertLocation(inputExpression, bool); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetcherIgnoresControlRecords() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); long producerId = 1; short producerEpoch = 0; int baseSequence = 0; int partitionLeaderEpoch = 0; ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.idempotentBuilder(buffer, Compression.NONE, 0L, producerId, producerEpoch, baseSequence); builder.append(0L, "key".getBytes(), null); builder.close(); MemoryRecords.writeEndTransactionalMarker(buffer, 1L, time.milliseconds(), partitionLeaderEpoch, producerId, producerEpoch, new EndTransactionMarker(ControlRecordType.ABORT, 0)); buffer.flip(); client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp0); assertEquals(1, records.size()); assertEquals(2L, subscriptions.position(tp0).offset); ConsumerRecord<byte[], byte[]> record = records.get(0); assertArrayEquals("key".getBytes(), record.key()); }
@VisibleForTesting HiveConf hiveConf() { return hiveConf; }
@Test public void testConf() { HiveConf conf = createHiveConf(); conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "file:/mywarehouse/"); HiveClientPool clientPool = new HiveClientPool(10, conf); HiveConf clientConf = clientPool.hiveConf(); assertThat(clientConf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)) .isEqualTo(conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)); assertThat(clientPool.poolSize()).isEqualTo(10); // 'hive.metastore.sasl.enabled' should be 'true' as defined in xml assertThat(clientConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname)) .isEqualTo(conf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname)); assertThat(clientConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)).isTrue(); }
public String locType() { String l = get(LATITUDE, null); String x = get(GRID_X, null); String def = l != null ? LOC_TYPE_GEO : (x != null ? LOC_TYPE_GRID : LOC_TYPE_NONE); return get(LOC_TYPE, def); }
@Test public void defaultLocationType() { print(cfg); assertEquals("not none", BasicElementConfig.LOC_TYPE_NONE, cfg.locType()); }
public static Supplier supplier( QueryPath[] paths, QueryDataType[] types, UpsertTargetDescriptor keyDescriptor, UpsertTargetDescriptor valueDescriptor, boolean failOnNulls ) { return new Supplier(paths, types, keyDescriptor, valueDescriptor, failOnNulls); }
@Test public void test_supplierSerialization() { InternalSerializationService serializationService = new DefaultSerializationServiceBuilder().build(); KvProjector.Supplier original = KvProjector.supplier( new QueryPath[]{QueryPath.KEY_PATH, QueryPath.VALUE_PATH}, new QueryDataType[]{QueryDataType.INT, QueryDataType.VARCHAR}, PrimitiveUpsertTargetDescriptor.INSTANCE, PrimitiveUpsertTargetDescriptor.INSTANCE, true ); KvProjector.Supplier serialized = serializationService.toObject(serializationService.toData(original)); assertThat(serialized).isEqualToComparingFieldByField(original); }
@Override public List<Plugin> plugins() { List<Plugin> plugins = configurationParameters.get(PLUGIN_PROPERTY_NAME, s -> Arrays.stream(s.split(",")) .map(String::trim) .map(PluginOption::parse) .map(pluginOption -> (Plugin) pluginOption) .collect(Collectors.toList())) .orElseGet(ArrayList::new); getPublishPlugin() .ifPresent(plugins::add); return plugins; }
@Test void getPluginNames() { ConfigurationParameters config = new MapConfigurationParameters( Constants.PLUGIN_PROPERTY_NAME, "html:path/to/report.html"); assertThat(new CucumberEngineOptions(config).plugins().stream() .map(Options.Plugin::pluginString) .collect(toList()), hasItem("html:path/to/report.html")); CucumberEngineOptions htmlAndJson = new CucumberEngineOptions( new MapConfigurationParameters(Constants.PLUGIN_PROPERTY_NAME, "html:path/with spaces/to/report.html, message:path/with spaces/to/report.ndjson")); assertThat(htmlAndJson.plugins().stream() .map(Options.Plugin::pluginString) .collect(toList()), hasItems("html:path/with spaces/to/report.html", "message:path/with spaces/to/report.ndjson")); }
public List<R> scanForClasspathResource(String resourceName, Predicate<String> packageFilter) { requireNonNull(resourceName, "resourceName must not be null"); requireNonNull(packageFilter, "packageFilter must not be null"); List<URI> urisForResource = getUrisForResource(getClassLoader(), resourceName); BiFunction<Path, Path, Resource> createResource = createClasspathResource(resourceName); return findResourcesForUris(urisForResource, DEFAULT_PACKAGE_NAME, packageFilter, createResource); }
@Test void scanForClasspathResource() { String resourceName = "io/cucumber/core/resource/test/resource.txt"; List<URI> resources = resourceScanner.scanForClasspathResource(resourceName, aPackage -> true); assertThat(resources, contains(URI.create("classpath:io/cucumber/core/resource/test/resource.txt"))); }
public B owner(String owner) { this.owner = owner; return getThis(); }
@Test void owner() { InterfaceBuilder builder = new InterfaceBuilder(); builder.owner("owner"); Assertions.assertEquals("owner", builder.build().getOwner()); }
public GoPluginBundleDescriptor unloadPlugin(GoPluginBundleDescriptor bundleDescriptor) { final GoPluginDescriptor firstPluginDescriptor = bundleDescriptor.descriptors().get(0); final GoPluginDescriptor pluginInBundle = getPluginByIdOrFileName(firstPluginDescriptor.id(), firstPluginDescriptor.fileName()); if (pluginInBundle == null) { throw new RuntimeException("Could not find existing plugin with ID: " + firstPluginDescriptor.id()); } final GoPluginBundleDescriptor bundleToRemove = pluginInBundle.bundleDescriptor(); for (GoPluginDescriptor pluginDescriptor : bundleToRemove.descriptors()) { if (getPluginByIdOrFileName(pluginDescriptor.id(), pluginDescriptor.fileName()) == null) { throw new RuntimeException("Could not find existing plugin with ID: " + pluginDescriptor.id()); } } for (GoPluginDescriptor pluginDescriptor : bundleToRemove.descriptors()) { idToDescriptorMap.remove(pluginDescriptor.id().toLowerCase()); } return bundleToRemove; }
@Test void shouldNotUnloadAPluginIfItWasNotLoadedBefore() { assertThatCode(() -> registry.unloadPlugin(new GoPluginBundleDescriptor(GoPluginDescriptor.builder().id("id1").isBundledPlugin(true).build()))) .isInstanceOf(RuntimeException.class); }
@Override public void execute( MailOptions mailOptions, final ThrowingRunnable<? extends Exception> command, final String descriptionFormat, final Object... descriptionArgs) { try { mailbox.put( new Mail( mailOptions, command, priority, actionExecutor, descriptionFormat, descriptionArgs)); } catch (MailboxClosedException mbex) { throw new RejectedExecutionException(mbex); } }
@Test void testOperations() throws Exception { AtomicBoolean wasExecuted = new AtomicBoolean(false); CompletableFuture.runAsync( () -> mailboxExecutor.execute(() -> wasExecuted.set(true), ""), otherThreadExecutor) .get(); mailbox.take(DEFAULT_PRIORITY).run(); assertThat(wasExecuted).isTrue(); }
@Override public List<Integer> embed(String s, Context context) { var start = System.nanoTime(); var tokens = tokenizer.embed(s, context); runtime.sampleSequenceLength(tokens.size(), context); runtime.sampleEmbeddingLatency((System.nanoTime() - start)/1_000_000d, context); return tokens; }
@Test public void testEmbedder() { var context = new Embedder.Context("schema.indexing"); String input = "This is a test"; Tensor expected = Tensor.from("tensor<float>(x[8]):[-0.666, 0.335, 0.227, 0.0919, -0.069, 0.323, 0.422, 0.270]"); Tensor result = embedder.embed(input, context, TensorType.fromSpec(("tensor<float>(x[8])"))); for(int i = 0; i < 8; i++) { assertEquals(expected.get(TensorAddress.of(i)), result.get(TensorAddress.of(i)), 1e-2); } // Thresholding on the above gives [0, 1, 1, 1, 0, 1, 1, 1] which is packed into 119 (int8) Tensor binarizedResult = embedder.embed(input, context, TensorType.fromSpec(("tensor<int8>(x[1])"))); assertEquals("tensor<int8>(x[1]):[119]", binarizedResult.toString()); binarizedResult = embedder.embed(input, context, TensorType.fromSpec(("tensor<int8>(x[2])"))); assertEquals("tensor<int8>(x[2]):[119, 44]", binarizedResult.toAbbreviatedString()); binarizedResult = embedder.embed(input, context, TensorType.fromSpec(("tensor<int8>(x[48])"))); assertTrue(binarizedResult.toAbbreviatedString().startsWith("tensor<int8>(x[48]):[119, 44")); assertThrows(IllegalArgumentException.class, () -> { // throws because the target tensor type is not compatible with the model output //49*8 > 384 embedder.embed(input, context, TensorType.fromSpec(("tensor<int8>(x[49])"))); }); Tensor float16Result = embedder.embed(input, context, TensorType.fromSpec(("tensor<bfloat16>(x[1])"))); assertEquals(-0.666, float16Result.sum().asDouble(),1e-3); }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testRecovery() { // merge all manifests for this test table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit(); assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0); Snapshot current = commit(table, table.newAppend().appendFile(FILE_A), branch); TableMetadata base = readMetadata(); long baseId = current.snapshotId(); V2Assert.assertEquals( "Last sequence number should be 1", 1, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); ManifestFile initialManifest = current.allManifests(table.io()).get(0); validateManifest( initialManifest, dataSeqs(1L), fileSeqs(1L), ids(baseId), files(FILE_A), statuses(Status.ADDED)); table.ops().failCommits(3); AppendFiles append = table.newAppend().appendFile(FILE_B); Snapshot pending = apply(append, branch); assertThat(pending.allManifests(table.io())).hasSize(1); ManifestFile newManifest = pending.allManifests(table.io()).get(0); assertThat(new File(newManifest.path())).exists(); validateManifest( newManifest, ids(pending.snapshotId(), baseId), concat(files(FILE_B), files(initialManifest))); V2Assert.assertEquals( "Snapshot sequence number should be 1", 1, latestSnapshot(table, branch).sequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 1", 1, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); Snapshot snapshot = commit(table, append, branch); long snapshotId = snapshot.snapshotId(); V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snapshot.sequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 2", 2, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); TableMetadata metadata = readMetadata(); assertThat(new File(newManifest.path())).exists(); assertThat(snapshot.allManifests(table.io())).containsExactly(newManifest); assertThat(snapshot.allManifests(table.io())).hasSize(1); ManifestFile manifestFile = snapshot.allManifests(table.io()).get(0); validateManifest( manifestFile, dataSeqs(2L, 1L), fileSeqs(2L, 1L), ids(snapshotId, baseId), files(FILE_B, FILE_A), statuses(Status.ADDED, Status.EXISTING)); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void loaderExceptionModCrash3() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/crash-report/loader_exception_mod_crash3.txt")), CrashReportAnalyzer.Rule.LOADING_CRASHED_FORGE); assertEquals("SuperOres", result.getMatcher().group("name")); assertEquals("superores", result.getMatcher().group("id")); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject(); String tmp; if (msg.getOriginator().getEntityType() != EntityType.DEVICE) { ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!")); } else if (!json.has("method")) { ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!")); } else if (!json.has("params")) { ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!")); } else { int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt(); boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE); tmp = msg.getMetaData().getValue("oneway"); boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT); boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue("requestUUID"); UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased(); tmp = msg.getMetaData().getValue("originServiceId"); String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null; tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME); long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds())); tmp = msg.getMetaData().getValue(DataConstants.RETRIES); Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null; String params = parseJsonData(json.get("params")); String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO)); RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder() .oneway(oneway) .method(json.get("method").getAsString()) .body(params) .tenantId(ctx.getTenantId()) .deviceId(new DeviceId(msg.getOriginator().getId())) .requestId(requestId) .requestUUID(requestUUID) .originServiceId(originServiceId) .expirationTime(expirationTime) .retries(retries) .restApiCall(restApiCall) .persisted(persisted) .additionalInfo(additionalInfo) .build(); ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> { if (ruleEngineDeviceRpcResponse.getError().isEmpty()) { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT)); ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS); } else { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name())); ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name()); } }); ctx.ack(msg); } }
@Test public void givenMsgBody_whenOnMsg_thenVerifyRequest() { given(ctxMock.getRpcService()).willReturn(rpcServiceMock); given(ctxMock.getTenantId()).willReturn(TENANT_ID); TbMsg msg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, TbMsgMetaData.EMPTY, MSG_DATA); node.onMsg(ctxMock, msg); ArgumentCaptor<RuleEngineDeviceRpcRequest> requestCaptor = ArgumentCaptor.forClass(RuleEngineDeviceRpcRequest.class); then(rpcServiceMock).should().sendRpcRequestToDevice(requestCaptor.capture(), any(Consumer.class)); assertThat(requestCaptor.getValue()) .hasFieldOrPropertyWithValue("method", "setGpio") .hasFieldOrPropertyWithValue("body", "{\"pin\":\"23\",\"value\":1}") .hasFieldOrPropertyWithValue("deviceId", DEVICE_ID) .hasFieldOrPropertyWithValue("tenantId", TENANT_ID) .hasFieldOrPropertyWithValue("additionalInfo", "information"); }
public ApiClient createApiClient(@NonNull String baseUrl, String token, String truststoreLocation) throws MalformedURLException, SSLException { WebClient webClient = createWebClient(baseUrl, token, truststoreLocation); ApiClient apiClient = new ApiClient(webClient); if (token != null && !token.isEmpty()) { apiClient.addDefaultHeader(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", token)); } apiClient.setBasePath(baseUrl); return apiClient; }
@Test public void testWebClientForHttpsUrl() throws Exception { ArgumentCaptor<String> truststoreLocationCapture = ArgumentCaptor.forClass(String.class); ApiClient apiClient = tablesApiClientFactorySpy.createApiClient( "https://test.openhouse.com", "", tmpCert.getAbsolutePath()); Mockito.verify(tablesApiClientFactorySpy, Mockito.times(1)) .createSslContext(truststoreLocationCapture.capture()); assertNotNull(apiClient); assertEquals(tmpCert.getAbsolutePath(), truststoreLocationCapture.getValue()); }
static boolean isOverLimit(long a, long b) { return Math.multiplyExact(a, b) >= 250_000; }
@Test public void isOverLimit() { assertThat(BlockRecognizer.isOverLimit(20, 40)).isFalse(); assertThat(BlockRecognizer.isOverLimit(3, 100_000)).isTrue(); // multiplication of these two ints is higher than Integer.MAX_VALUE assertThat(BlockRecognizer.isOverLimit(50_000, 60_000)).isTrue(); }
void handleDeletedTopic(TopicImage deletedTopic) { deletedTopic.partitions().values().forEach(prev -> handlePartitionChange(prev, null)); globalTopicsChange--; }
@Test public void testHandleDeletedTopic() { ControllerMetricsChanges changes = new ControllerMetricsChanges(); Map<Integer, PartitionRegistration> partitions = new HashMap<>(); partitions.put(0, fakePartitionRegistration(NORMAL)); partitions.put(1, fakePartitionRegistration(NORMAL)); partitions.put(2, fakePartitionRegistration(NON_PREFERRED_LEADER)); partitions.put(3, fakePartitionRegistration(NON_PREFERRED_LEADER)); partitions.put(4, fakePartitionRegistration(OFFLINE)); TopicImage topicImage = new TopicImage("foo", Uuid.fromString("wXtW6pQbTS2CL6PjdRCqVw"), partitions); changes.handleDeletedTopic(topicImage); assertEquals(-1, changes.globalTopicsChange()); assertEquals(-5, changes.globalPartitionsChange()); assertEquals(-1, changes.offlinePartitionsChange()); // The offline partition counts as a partition without its preferred leader. assertEquals(-3, changes.partitionsWithoutPreferredLeaderChange()); }
public static byte[] toBytesPadded(BigInteger value, int length) { byte[] result = new byte[length]; byte[] bytes = value.toByteArray(); int bytesLength; int srcOffset; if (bytes[0] == 0) { bytesLength = bytes.length - 1; srcOffset = 1; } else { bytesLength = bytes.length; srcOffset = 0; } if (bytesLength > length) { throw new RuntimeException("Input is too large to put in byte array of size " + length); } int destOffset = length - bytesLength; System.arraycopy(bytes, srcOffset, result, destOffset, bytesLength); return result; }
@Test public void testToBytesPadded() { assertArrayEquals(Numeric.toBytesPadded(BigInteger.TEN, 1), (new byte[] {0xa})); assertArrayEquals( Numeric.toBytesPadded(BigInteger.TEN, 8), (new byte[] {0, 0, 0, 0, 0, 0, 0, 0xa})); assertArrayEquals( Numeric.toBytesPadded(BigInteger.valueOf(Integer.MAX_VALUE), 4), (new byte[] {0x7f, (byte) 0xff, (byte) 0xff, (byte) 0xff})); }
public boolean isBeforeOrAt(KinesisRecord other) { if (shardIteratorType == AT_TIMESTAMP) { return timestamp.compareTo(other.getApproximateArrivalTimestamp()) <= 0; } int result = extendedSequenceNumber().compareTo(other.getExtendedSequenceNumber()); if (result == 0) { return shardIteratorType == AT_SEQUENCE_NUMBER; } return result < 0; }
@Test public void testComparisonWithTimestamp() { DateTime referenceTimestamp = DateTime.now(); assertThat( checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant()) .isBeforeOrAt(recordWith(referenceTimestamp.minusMillis(10).toInstant()))) .isFalse(); assertThat( checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant()) .isBeforeOrAt(recordWith(referenceTimestamp.toInstant()))) .isTrue(); assertThat( checkpoint(AT_TIMESTAMP, referenceTimestamp.toInstant()) .isBeforeOrAt(recordWith(referenceTimestamp.plusMillis(10).toInstant()))) .isTrue(); }
public int startWithRunStrategy( @NotNull WorkflowInstance instance, @NotNull RunStrategy runStrategy) { return withMetricLogError( () -> withRetryableTransaction( conn -> { final long nextInstanceId = getLatestInstanceId(conn, instance.getWorkflowId()) + 1; if (isDuplicated(conn, instance)) { return 0; } completeInstanceInit(conn, nextInstanceId, instance); int res; if (instance.getStatus().isTerminal()) { // Save it directly and send a terminate event res = addTerminatedInstance(conn, instance); } else { switch (runStrategy.getRule()) { case SEQUENTIAL: case PARALLEL: case STRICT_SEQUENTIAL: res = insertInstance(conn, instance, true, null); break; case FIRST_ONLY: res = startFirstOnlyInstance(conn, instance); break; case LAST_ONLY: res = startLastOnlyInstance(conn, instance); break; default: throw new MaestroInternalError( "When start, run strategy [%s] is not supported.", runStrategy); } } if (instance.getWorkflowInstanceId() == nextInstanceId) { updateLatestInstanceId(conn, instance.getWorkflowId(), nextInstanceId); } return res; }), "startWithRunStrategy", "Failed to start a workflow [{}][{}] with run strategy [{}]", instance.getWorkflowId(), instance.getWorkflowUuid(), runStrategy); }
@Test public void testStartWithRunStrategyToUpdateAncestorStatus() { dao.tryTerminateQueuedInstance(wfi, WorkflowInstance.Status.FAILED, "test"); WorkflowInstance.Status status = dao.getWorkflowInstanceStatus(wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), 1L); assertEquals(WorkflowInstance.Status.FAILED, status); String rawStatus = dao.getWorkflowInstanceRawStatus(wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), 1L); assertEquals("FAILED", rawStatus); wfi.setWorkflowRunId(0L); wfi.setWorkflowUuid("test-uuid"); wfi.setRunConfig(new RunConfig()); wfi.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_INCOMPLETE); runStrategyDao.startWithRunStrategy(wfi, Defaults.DEFAULT_RUN_STRATEGY); status = dao.getWorkflowInstanceStatus(wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), 1L); assertEquals(WorkflowInstance.Status.FAILED, status); rawStatus = dao.getWorkflowInstanceRawStatus(wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), 1L); assertEquals("FAILED_2", rawStatus); }
@Override public void info(String msg) { info(msg, (Object[]) null); }
@Test public void testInfo() { Slf4jLogger logger = new Slf4jLogger((Slf4jLoggerTest.class)); FuryLogger furyLogger = new FuryLogger((Slf4jLoggerTest.class)); logger.info("testInfo"); logger.info("testInfo {}", "placeHolder"); logger.warn("testInfo {}", "placeHolder"); logger.error("testInfo {}", "placeHolder", new Exception("test log")); furyLogger.info("testInfo"); furyLogger.info("testInfo {}", "placeHolder"); furyLogger.warn("testInfo {}", "placeHolder"); furyLogger.error("testInfo {}", "placeHolder", new Exception("test log")); furyLogger.error(null, new Exception("test log")); }
@Override public Repository getRepository() { try { // NOTE: this class formerly used a ranking system to prioritize the providers registered and would check them in order // of priority for the first non-null repository. In practice, we only ever registered one at a time, spoon or PUC. // As such, the priority ranking is gone and will need to be reintroduced if desired later. Collection<KettleRepositoryProvider> repositoryProviders = PluginServiceLoader.loadServices( KettleRepositoryProvider.class ); return repositoryProviders.stream().map( KettleRepositoryProvider::getRepository ).filter( Objects::nonNull ).findFirst().orElse( null ); } catch ( KettlePluginException e ) { logger.error( "Error getting repository", e ); } return null; }
@Test public void testGetRepositoryMultiple() { KettleRepositoryProvider provider1 = mock( KettleRepositoryProvider.class ); KettleRepositoryProvider provider2 = mock( KettleRepositoryProvider.class ); KettleRepositoryProvider provider3 = mock( KettleRepositoryProvider.class ); KettleRepositoryProvider provider4 = mock( KettleRepositoryProvider.class ); Collection<KettleRepositoryProvider> providerCollection = new ArrayList<>(); providerCollection.add( provider1 ); providerCollection.add( provider2 ); providerCollection.add( provider3 ); providerCollection.add( provider4 ); Repository repository = mock( Repository.class ); when( repository.getName() ).thenReturn( "repo1" ); Repository repository2 = mock( Repository.class ); when( repository2.getName() ).thenReturn( "repo2" ); when( provider1.getRepository() ).thenReturn( repository ); when( provider2.getRepository() ).thenReturn( repository2 ); Repository repository3 = mock( Repository.class ); when( repository3.getName() ).thenReturn( "repo3" ); when( provider3.getRepository() ).thenReturn( repository3 ); Repository repository4 = mock( Repository.class ); when( repository4.getName() ).thenReturn( "repo4" ); when( provider4.getRepository() ).thenReturn( repository4 ); // this test is a bit ugly and fairly dependent on the implementation of the collection being used here and // the Java streams implementation try ( MockedStatic<PluginServiceLoader> pluginServiceLoaderMockedStatic = Mockito.mockStatic( PluginServiceLoader.class ) ) { pluginServiceLoaderMockedStatic.when( () -> PluginServiceLoader.loadServices( any() ) ).thenReturn( providerCollection ); Repository repoReturned = kettleRepositoryLocator.getRepository(); assertEquals( "Expected repo1 got " + repoReturned.getName(), repository, repoReturned ); verify( provider1 ).getRepository(); verify( provider2, never() ).getRepository(); verify( provider3, never() ).getRepository(); verify( provider4, never() ).getRepository(); } }
public Set<AggregationFunctionColumnPair> getFunctionColumnPairs() { return _aggregationSpecs.keySet(); }
@Test public void testUniqueFunctionColumnPairs() { Set<AggregationFunctionColumnPair> expected = new HashSet<>(); expected.add(AggregationFunctionColumnPair.fromColumnName("count__*")); expected.add(AggregationFunctionColumnPair.fromColumnName("sum__dimX")); Configuration metadataProperties = createMetadata(List.of("dimX"), expected); StarTreeV2Metadata starTreeV2Metadata = new StarTreeV2Metadata(metadataProperties); Set<AggregationFunctionColumnPair> actual = starTreeV2Metadata.getFunctionColumnPairs(); assertEquals(expected, actual); }
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) { final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader, writer); final String message; switch (compatibility.getCompatibility()) { case INCOMPATIBLE: { message = String.format( "Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", writer.toString(true), reader.toString(true)); break; } case COMPATIBLE: { message = READER_WRITER_COMPATIBLE_MESSAGE; break; } default: throw new AvroRuntimeException("Unknown compatibility: " + compatibility); } return new SchemaPairCompatibility(compatibility, reader, writer, message); }
@Test void unionWriterSimpleReaderIncompatibility() { Schema mandatorySchema = SchemaBuilder.record("Account").fields().name("age").type().intType().noDefault() .endRecord(); Schema optionalSchema = SchemaBuilder.record("Account").fields().optionalInt("age").endRecord(); SchemaPairCompatibility compatibility = checkReaderWriterCompatibility(mandatorySchema, optionalSchema); assertEquals(SchemaCompatibilityType.INCOMPATIBLE, compatibility.getType()); Incompatibility incompatibility = compatibility.getResult().getIncompatibilities().get(0); assertEquals("reader type: INT not compatible with writer type: NULL", incompatibility.getMessage()); assertEquals("/fields/0/type/0", incompatibility.getLocation()); }
public static PostgreSQLCommandPacket newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLPacketPayload payload) { if (!PostgreSQLCommandPacketType.isExtendedProtocolPacketType(commandPacketType)) { payload.getByteBuf().skipBytes(1); return getPostgreSQLCommandPacket(commandPacketType, payload); } List<PostgreSQLCommandPacket> result = new ArrayList<>(); while (payload.hasCompletePacket()) { PostgreSQLCommandPacketType type = PostgreSQLCommandPacketType.valueOf(payload.readInt1()); int length = payload.getByteBuf().getInt(payload.getByteBuf().readerIndex()); PostgreSQLPacketPayload slicedPayload = new PostgreSQLPacketPayload(payload.getByteBuf().readSlice(length), payload.getCharset()); result.add(getPostgreSQLCommandPacket(type, slicedPayload)); } return new PostgreSQLAggregatedCommandPacket(result); }
@Test void assertNewInstanceWithParseComPacket() { assertThat(PostgreSQLCommandPacketFactory.newInstance(PostgreSQLCommandPacketType.PARSE_COMMAND, payload), instanceOf(PostgreSQLAggregatedCommandPacket.class)); }
public void removeAssignmentsForPartitions(final Set<TopicIdPartition> partitions) { updateAssignments(Collections.emptySet(), Objects.requireNonNull(partitions)); }
@Test public void testRemoveAssignmentsForPartitions() { final List<TopicIdPartition> allPartitions = getIdPartitions("sample", 3); final Map<TopicPartition, Long> endOffsets = allPartitions.stream() .map(idp -> toRemoteLogPartition(partitioner.metadataPartition(idp))) .collect(Collectors.toMap(Function.identity(), e -> 0L, (a, b) -> b)); consumer.updateEndOffsets(endOffsets); consumerTask.addAssignmentsForPartitions(new HashSet<>(allPartitions)); consumerTask.ingestRecords(); final TopicIdPartition tpId = allPartitions.get(0); assertTrue(consumerTask.isUserPartitionAssigned(tpId), "Partition " + tpId + " has not been assigned"); addRecord(consumer, partitioner.metadataPartition(tpId), tpId, 0); consumerTask.ingestRecords(); assertTrue(consumerTask.readOffsetForMetadataPartition(partitioner.metadataPartition(tpId)).isPresent()); final Set<TopicIdPartition> removePartitions = Collections.singleton(tpId); consumerTask.removeAssignmentsForPartitions(removePartitions); consumerTask.ingestRecords(); for (final TopicIdPartition idPartition : allPartitions) { assertEquals(!removePartitions.contains(idPartition), consumerTask.isUserPartitionAssigned(idPartition), "Partition " + idPartition + " has not been removed"); } for (TopicIdPartition removePartition : removePartitions) { assertTrue(handler.isPartitionCleared.containsKey(removePartition), "Partition " + removePartition + " has not been cleared"); } }
@Override public OpensearchDistribution get() { return distribution.get(); }
@Test void testDetectionWithoutArch() { final OpensearchDistribution dist = provider(tempDirWithoutArch, OpensearchArchitecture.x64).get(); Assertions.assertThat(dist.version()).isEqualTo("2.4.1"); Assertions.assertThat(dist.architecture()).isNull(); Assertions.assertThat(dist.platform()).isNull(); }
public double distanceToAsDouble(final IGeoPoint other) { final double lat1 = DEG2RAD * getLatitude(); final double lat2 = DEG2RAD * other.getLatitude(); final double lon1 = DEG2RAD * getLongitude(); final double lon2 = DEG2RAD * other.getLongitude(); return RADIUS_EARTH_METERS * 2 * Math.asin(Math.min(1, Math.sqrt( Math.pow(Math.sin((lat2 - lat1) / 2), 2) + Math.cos(lat1) * Math.cos(lat2) * Math.pow(Math.sin((lon2 - lon1) / 2), 2) ))); }
@Test public void test_distanceTo_itself() { final double distancePrecisionDelta = 0; final int iterations = 100; for (int i = 0; i < iterations; i++) { final GeoPoint target = new GeoPoint(getRandomLatitude(), getRandomLongitude()); final GeoPoint other = new GeoPoint(target); assertEquals("distance to self is zero for " + target, 0, target.distanceToAsDouble(other), distancePrecisionDelta); assertEquals("reverse distance to self is zero for " + other, 0, other.distanceToAsDouble(target), distancePrecisionDelta); } }
@Override public byte[] echo(byte[] message) { return read(null, ByteArrayCodec.INSTANCE, ECHO, message); }
@Test public void testEcho() { assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes()); }
@SuppressWarnings("unchecked") public static String encode(Type parameter) { if (parameter instanceof NumericType) { return encodeNumeric(((NumericType) parameter)); } else if (parameter instanceof Address) { return encodeAddress((Address) parameter); } else if (parameter instanceof Bool) { return encodeBool((Bool) parameter); } else if (parameter instanceof Bytes) { return encodeBytes((Bytes) parameter); } else if (parameter instanceof DynamicBytes) { return encodeDynamicBytes((DynamicBytes) parameter); } else if (parameter instanceof Utf8String) { return encodeString((Utf8String) parameter); } else if (parameter instanceof StaticArray) { if (DynamicStruct.class.isAssignableFrom( ((StaticArray) parameter).getComponentType())) { return encodeStaticArrayWithDynamicStruct((StaticArray) parameter); } else { return encodeArrayValues((StaticArray) parameter); } } else if (parameter instanceof DynamicStruct) { return encodeDynamicStruct((DynamicStruct) parameter); } else if (parameter instanceof DynamicArray) { return encodeDynamicArray((DynamicArray) parameter); } else if (parameter instanceof PrimitiveType) { return encode(((PrimitiveType) parameter).toSolidityType()); } else { throw new UnsupportedOperationException( "Type cannot be encoded: " + parameter.getClass()); } }
@Test public void testStructContainingDynamicBytes() { String expectedEncoding = "0000000000000000000000000000000000000000000000000000000000000060" + "0000000000000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000000000000000a0" + "0000000000000000000000000000000000000000000000000000000000000007" + "64796e616d696300000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000005" + "4279746573000000000000000000000000000000000000000000000000000000"; assertEquals( expectedEncoding, encode(AbiV2TestFixture.addDynamicBytesArrayFunction.getInputParameters().get(0))); }
@Override public void abortCurrentTriggering() { // unfortunately, this method does not run in the scheduler executor, but in the // checkpoint coordinator time thread. // we can remove the delegation once the checkpoint coordinator runs fully in the // scheduler's main thread executor mainThreadExecutor.execute( () -> subtaskGatewayMap .values() .forEach( SubtaskGatewayImpl ::openGatewayAndUnmarkLastCheckpointIfAny)); }
@Test void abortedCheckpointReleasesBlockedEvents() throws Exception { final EventReceivingTasks tasks = EventReceivingTasks.createForRunningTasks(); final OperatorCoordinatorHolder holder = createCoordinatorHolder(tasks, TestingOperatorCoordinator::new); triggerAndCompleteCheckpoint(holder, 123L); getCoordinator(holder).getSubtaskGateway(0).sendEvent(new TestOperatorEvent(1337)); holder.abortCurrentTriggering(); assertThat(tasks.getSentEventsForSubtask(0)).containsExactly(new TestOperatorEvent(1337)); }
static Map<String, String> resolveVariables(String expression, String str) { if (expression == null || str == null) return Collections.emptyMap(); Map<String, String> resolvedVariables = new HashMap<>(); StringBuilder variableBuilder = new StringBuilder(); State state = State.TEXT; int j = 0; int expressionLength = expression.length(); for (int i = 0; i < expressionLength; i++) { char e = expression.charAt(i); switch (e) { case '{': if (state == END_VAR) return Collections.emptyMap(); state = VAR; break; case '}': if (state != VAR) return Collections.emptyMap(); state = END_VAR; if (i != expressionLength - 1) break; default: switch (state) { case VAR: variableBuilder.append(e); break; case END_VAR: String replacement; boolean ec = i == expressionLength - 1; if (ec) { replacement = str.substring(j); } else { int k = str.indexOf(e, j); if (k == -1) return Collections.emptyMap(); replacement = str.substring(j, str.indexOf(e, j)); } resolvedVariables.put(variableBuilder.toString(), replacement); j += replacement.length(); if (j == str.length() && ec) return resolvedVariables; variableBuilder.setLength(0); state = TEXT; case TEXT: if (str.charAt(j) != e) return Collections.emptyMap(); j++; } } } return resolvedVariables; }
@Test public void testSingleVariable() { Map<String, String> res = resolveVariables("{id}", "5435"); assertEquals(1, res.size()); assertEquals(res.get("id"), "5435"); }
public static Statement sanitize( final Statement node, final MetaStore metaStore) { return sanitize(node, metaStore, true); }
@Test public void shouldThrowOnAmbiguousQualifierForJoinColumnReference() { // Given: final Statement stmt = givenQuery( "SELECT COL0 FROM TEST1 JOIN TEST2 ON TEST1.COL0=TEST2.COL0;"); // When: final Exception e = assertThrows( KsqlException.class, () -> AstSanitizer.sanitize(stmt, META_STORE) ); // Then: assertThat(e.getMessage(), containsString( "Column 'COL0' is ambiguous. Could be TEST1.COL0 or TEST2.COL0.")); }
public boolean matches(String comment) { for (String escapedMatcher : escapeMatchers()) { Pattern pattern = Pattern.compile(String.join(escapedMatcher, "\\B", "\\B|\\b", "\\b")); if (pattern.matcher(comment).find()) { return true; } } return false; }
@Test void shouldNotMatchAnyThing() throws Exception { assertThat(new Matcher("").matches("ja")).isFalse(); }
@Override @SuppressWarnings("unchecked") public <T> T get(final PluginConfigSpec<T> configSpec) { if (rawSettings.containsKey(configSpec.name())) { Object o = rawSettings.get(configSpec.name()); if (configSpec.type().isAssignableFrom(o.getClass())) { return (T) o; } else if (configSpec.type() == Double.class && o.getClass() == Long.class) { return configSpec.type().cast(((Long)o).doubleValue()); } else if (configSpec.type() == Boolean.class && o instanceof String) { return configSpec.type().cast(Boolean.parseBoolean((String) o)); } else if (configSpec.type() == Codec.class && o instanceof String && pluginFactory != null) { Codec codec = pluginFactory.buildDefaultCodec((String) o); return configSpec.type().cast(codec); } else if (configSpec.type() == Codec.class && o instanceof RubyObject && RubyCodecDelegator.isRubyCodecSubclass((RubyObject) o)) { Codec codec = pluginFactory.buildRubyCodecWrapper((RubyObject) o); return configSpec.type().cast(codec); } else if (configSpec.type() == URI.class && o instanceof String) { try { URI uri = new URI((String) o); return configSpec.type().cast(uri); } catch (URISyntaxException ex) { throw new IllegalStateException( String.format("Invalid URI specified for '%s'", configSpec.name())); } } else if (configSpec.type() == Password.class && o instanceof String) { Password p = new Password((String) o); return configSpec.type().cast(p); } else { throw new IllegalStateException( String.format("Setting value for '%s' of type '%s' incompatible with defined type of '%s'", configSpec.name(), o.getClass(), configSpec.type())); } } else if (configSpec.type() == Codec.class && configSpec.getRawDefaultValue() != null && pluginFactory != null) { Codec codec = pluginFactory.buildDefaultCodec(configSpec.getRawDefaultValue()); return configSpec.type().cast(codec); } else if (configSpec.type() == URI.class && configSpec.getRawDefaultValue() != null) { try { URI uri = new URI(configSpec.getRawDefaultValue()); return configSpec.type().cast(uri); } catch (URISyntaxException ex) { throw new IllegalStateException( String.format("Invalid default URI specified for '%s'", configSpec.name())); } } else if (configSpec.type() == Password.class && configSpec.getRawDefaultValue() != null) { Password p = new Password(configSpec.getRawDefaultValue()); return configSpec.type().cast(p); } else { return configSpec.defaultValue(); } }
@Test public void testDowncastFromLongToDouble() { long defaultValue = 1L; PluginConfigSpec<Double> doubleConfig = PluginConfigSpec.floatSetting(numberKey, defaultValue, false, false); Configuration config = new ConfigurationImpl(Collections.singletonMap(numberKey, defaultValue)); double x = config.get(doubleConfig); Assert.assertEquals(defaultValue, x, 0.001); }
public static String toString(String unicode) { if (StrUtil.isBlank(unicode)) { return unicode; } final int len = unicode.length(); StringBuilder sb = new StringBuilder(len); int i; int pos = 0; while ((i = StrUtil.indexOfIgnoreCase(unicode, "\\u", pos)) != -1) { sb.append(unicode, pos, i);//写入Unicode符之前的部分 pos = i; if (i + 5 < len) { char c; try { c = (char) Integer.parseInt(unicode.substring(i + 2, i + 6), 16); sb.append(c); pos = i + 6;//跳过整个Unicode符 } catch (NumberFormatException e) { //非法Unicode符,跳过 sb.append(unicode, pos, i + 2);//写入"\\u" pos = i + 2; } } else { //非Unicode符,结束 break; } } if (pos < len) { sb.append(unicode, pos, len); } return sb.toString(); }
@Test public void convertTest4() { String str = "aaa\\U4e2d\\u6587\\u111\\urtyu\\u0026"; String res = UnicodeUtil.toString(str); assertEquals("aaa中文\\u111\\urtyu&", res); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldThrowOnTypeMismatchCannotCoerce() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.INTEGER) .build(); final List<ColumnName> names = ImmutableList.of(KEY, COL0); final Expression exp = new StringLiteral("a"); // When: final KsqlException e = assertThrows(KsqlException.class, () -> recordFactory.build( names, ImmutableList.of(exp, exp), schema, DataSourceType.KSTREAM )); // Then: assertThat(e.getMessage(), containsString("Expected type")); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueWithKeySupplierAndStore() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues( flatValueTransformerWithKeySupplier, (Named) null, "storeName")); assertThat(exception.getMessage(), equalTo("named can't be null")); }
@Override public boolean shouldWait() { RingbufferContainer ringbuffer = getRingBufferContainerOrNull(); if (resultSet == null) { resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter); sequence = startSequence; } if (ringbuffer == null) { return minSize > 0; } sequence = ringbuffer.clampReadSequenceToBounds(sequence); if (minSize == 0) { if (sequence < ringbuffer.tailSequence() + 1) { readMany(ringbuffer); } return false; } if (resultSet.isMinSizeReached()) { // enough items have been read, we are done. return false; } if (sequence == ringbuffer.tailSequence() + 1) { // the sequence is not readable return true; } readMany(ringbuffer); return !resultSet.isMinSizeReached(); }
@Test public void whenFilterProvidedAndNoItemsAvailable() { long startSequence = ringbuffer.tailSequence() + 1; IFunction<String, Boolean> filter = input -> input.startsWith("good"); ReadManyOperation op = getReadManyOperation(startSequence, 3, 3, filter); assertTrue(op.shouldWait()); ReadResultSetImpl response = getReadResultSet(op); assertEquals(startSequence, op.sequence); assertTrue(getReadResultSet(op).isEmpty()); ringbuffer.add("bad1"); assertTrue(op.shouldWait()); assertEquals(startSequence + 1, op.sequence); assertEquals(1, response.readCount()); assertEquals(1, response.getNextSequenceToReadFrom()); assertEquals(0, response.size()); ringbuffer.add("good1"); assertTrue(op.shouldWait()); assertEquals(startSequence + 2, op.sequence); assertEquals(asList("good1"), response); assertEquals(2, response.readCount()); assertEquals(2, response.getNextSequenceToReadFrom()); ringbuffer.add("bad2"); assertTrue(op.shouldWait()); assertEquals(startSequence + 3, op.sequence); assertEquals(asList("good1"), response); assertEquals(3, response.readCount()); assertEquals(3, response.getNextSequenceToReadFrom()); ringbuffer.add("good2"); assertTrue(op.shouldWait()); assertEquals(startSequence + 4, op.sequence); assertEquals(asList("good1", "good2"), response); assertEquals(4, response.readCount()); assertEquals(4, response.getNextSequenceToReadFrom()); ringbuffer.add("bad3"); assertTrue(op.shouldWait()); assertEquals(startSequence + 5, op.sequence); assertEquals(asList("good1", "good2"), response); assertEquals(5, response.readCount()); assertEquals(5, response.getNextSequenceToReadFrom()); ringbuffer.add("good3"); assertFalse(op.shouldWait()); assertEquals(startSequence + 6, op.sequence); assertEquals(asList("good1", "good2", "good3"), response); assertEquals(6, response.readCount()); assertEquals(6, response.getNextSequenceToReadFrom()); }
@SuppressWarnings("ParameterNumber") TransientQueryMetadata buildTransientQuery( final String statementText, final QueryId queryId, final Set<SourceName> sources, final ExecutionStep<?> physicalPlan, final String planSummary, final LogicalSchema schema, final OptionalInt limit, final Optional<WindowInfo> windowInfo, final boolean excludeTombstones, final QueryMetadata.Listener listener, final StreamsBuilder streamsBuilder, final Optional<ImmutableMap<TopicPartition, Long>> endOffsets, final MetricCollectors metricCollectors ) { final KsqlConfig ksqlConfig = config.getConfig(true); final String applicationId = QueryApplicationId.build(ksqlConfig, false, queryId); final RuntimeBuildContext runtimeBuildContext = buildContext( applicationId, queryId, streamsBuilder ); final Map<String, Object> streamsProperties = buildStreamsProperties( applicationId, Optional.of(queryId), metricCollectors, config.getConfig(true), processingLogContext ); streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 0); final Object buildResult = buildQueryImplementation(physicalPlan, runtimeBuildContext); final TransientQueryQueue queue = buildTransientQueryQueue(buildResult, limit, excludeTombstones, endOffsets); final Topology topology = streamsBuilder.build(PropertiesUtil.asProperties(streamsProperties)); final TransientQueryMetadata.ResultType resultType = buildResult instanceof KTableHolder ? windowInfo.isPresent() ? ResultType.WINDOWED_TABLE : ResultType.TABLE : ResultType.STREAM; return new TransientQueryMetadata( statementText, schema, sources, planSummary, queue, queryId, applicationId, topology, kafkaStreamsBuilder, streamsProperties, config.getOverrides(), ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG), ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_ERROR_MAX_QUEUE_SIZE), resultType, ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_INITIAL_MS), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_MAX_MS), listener, processingLogContext.getLoggerFactory() ); }
@Test public void shouldBuildTransientQueryWithSharedRutimesCorrectly() { // Given: givenTransientQuery(); when(ksqlConfig.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)).thenReturn(true); // When: final TransientQueryMetadata queryMetadata = queryBuilder.buildTransientQuery( STATEMENT_TEXT, QUERY_ID, SOURCES.stream().map(DataSource::getName).collect(Collectors.toSet()), physicalPlan, SUMMARY, TRANSIENT_SINK_SCHEMA, LIMIT, Optional.empty(), false, queryListener, streamsBuilder, Optional.empty(), new MetricCollectors() ); queryMetadata.initialize(); // Then: assertThat(queryMetadata.getStatementString(), equalTo(STATEMENT_TEXT)); assertThat(queryMetadata.getSourceNames(), equalTo(SOURCES.stream() .map(DataSource::getName).collect(Collectors.toSet()))); assertThat(queryMetadata.getExecutionPlan(), equalTo(SUMMARY)); assertThat(queryMetadata.getTopology(), is(topology)); assertThat(queryMetadata.getOverriddenProperties(), equalTo(OVERRIDES)); verify(kafkaStreamsBuilder).build(any(), propertyCaptor.capture()); assertThat(queryMetadata.getStreamsProperties(), equalTo(propertyCaptor.getValue())); assertThat(queryMetadata.getStreamsProperties().get(InternalConfig.TOPIC_PREFIX_ALTERNATIVE), nullValue()); }
public void shiftOffsetsBy(final Consumer<byte[], byte[]> client, final Set<TopicPartition> inputTopicPartitions, final long shiftBy) { final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions); final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions); final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size()); for (final TopicPartition topicPartition : inputTopicPartitions) { final long position = client.position(topicPartition); final long offset = position + shiftBy; topicPartitionsAndOffset.put(topicPartition, offset); } final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset = checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets); for (final TopicPartition topicPartition : inputTopicPartitions) { client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition)); } }
@Test public void testShiftOffsetByWhenBetweenBeginningAndEndOffset() { final Map<TopicPartition, Long> endOffsets = new HashMap<>(); endOffsets.put(topicPartition, 4L); consumer.updateEndOffsets(endOffsets); final Map<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(topicPartition, 0L); consumer.updateBeginningOffsets(beginningOffsets); streamsResetter.shiftOffsetsBy(consumer, inputTopicPartitions, 3L); final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500)); assertEquals(2, records.count()); }
@Override public Output run(RunContext runContext) throws Exception { String renderedNamespace = runContext.render(this.namespace); FlowService flowService = ((DefaultRunContext) runContext).getApplicationContext().getBean(FlowService.class); flowService.checkAllowedNamespace(runContext.tenantId(), renderedNamespace, runContext.tenantId(), runContext.flowInfo().namespace()); String renderedKey = runContext.render(this.key); Optional<KVValue> maybeValue = runContext.namespaceKv(renderedNamespace).getValue(renderedKey); if (this.errorOnMissing && maybeValue.isEmpty()) { throw new NoSuchElementException("No value found for key '" + renderedKey + "' in namespace '" + renderedNamespace + "' and `errorOnMissing` is set to true"); } return Output.builder() .value(maybeValue.map(KVValue::value).orElse(null)) .build(); }
@Test void shouldGetGivenNonExistingKey() throws Exception { // Given String namespaceId = "io.kestra." + IdUtils.create(); RunContext runContext = this.runContextFactory.of(Map.of( "flow", Map.of("namespace", namespaceId), "inputs", Map.of( "key", TEST_KV_KEY, "namespace", namespaceId ) )); Get get = Get.builder() .id(Get.class.getSimpleName()) .type(Get.class.getName()) .namespace(namespaceId) .key("my-key") .build(); // When Get.Output run = get.run(runContext); // Then assertThat(run.getValue(), nullValue()); NoSuchElementException noSuchElementException = Assertions.assertThrows(NoSuchElementException.class, () -> get.toBuilder().errorOnMissing(true).build().run(runContext)); assertThat(noSuchElementException.getMessage(), is("No value found for key 'my-key' in namespace '" + namespaceId + "' and `errorOnMissing` is set to true")); }
public static StatementExecutorResponse execute( final ConfiguredStatement<DropConnector> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final String connectorName = statement.getStatement().getConnectorName(); final boolean ifExists = statement.getStatement().getIfExists(); final ConnectResponse<String> response = serviceContext.getConnectClient().delete(connectorName); if (response.error().isPresent()) { if (ifExists && response.httpCode() == HttpStatus.SC_NOT_FOUND) { return StatementExecutorResponse.handled(Optional.of( new WarningEntity(statement.getMaskedStatementText(), "Connector '" + connectorName + "' does not exist."))); } else { final String errorMsg = "Failed to drop connector: " + response.error().get(); throw new KsqlRestException(EndpointResponse.create() .status(response.httpCode()) .entity(new KsqlErrorMessage(Errors.toErrorCode(response.httpCode()), errorMsg)) .build() ); } } return StatementExecutorResponse.handled(Optional.of( new DropConnectorEntity(statement.getMaskedStatementText(), connectorName))); }
@Test public void shouldThrowOnError() { // Given: when(connectClient.delete(anyString())) .thenReturn(ConnectResponse.failure("Danger Mouse!", HttpStatus.SC_INTERNAL_SERVER_ERROR)); // When: final KsqlRestException e = assertThrows( KsqlRestException.class, () -> DropConnectorExecutor.execute( DROP_CONNECTOR_CONFIGURED, mock(SessionProperties.class), null, serviceContext)); final KsqlRestException eIfExists = assertThrows( KsqlRestException.class, () -> DropConnectorExecutor.execute( DROP_CONNECTOR_CONFIGURED, mock(SessionProperties.class), null, serviceContext)); // Then: assertThat(e.getResponse().getStatus(), is(HttpStatus.SC_INTERNAL_SERVER_ERROR)); final KsqlErrorMessage err = (KsqlErrorMessage) e.getResponse().getEntity(); assertThat(err.getErrorCode(), is(Errors.toErrorCode(HttpStatus.SC_INTERNAL_SERVER_ERROR))); assertThat(err.getMessage(), containsString("Failed to drop connector: Danger Mouse!")); assertThat(eIfExists.getResponse().getStatus(), is(HttpStatus.SC_INTERNAL_SERVER_ERROR)); final KsqlErrorMessage errIfExists = (KsqlErrorMessage) e.getResponse().getEntity(); assertThat(errIfExists.getErrorCode(), is(Errors.toErrorCode(HttpStatus.SC_INTERNAL_SERVER_ERROR))); assertThat(errIfExists.getMessage(), containsString("Failed to drop connector: Danger Mouse!")); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldFillInFullRowWithNoSchema() { // Given: final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(), ImmutableList.of( new StringLiteral("str"), new StringLiteral("str"), new LongLiteral(2L) ) ); // When: executor.execute(statement, mock(SessionProperties.class), engine, serviceContext); // Then: verify(keySerializer).serialize(TOPIC_NAME, genericKey("str")); verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", 2L)); verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE)); }
public static boolean isArgumentMatched(Argument arg, Argument patternArg) { if (USE_JAVA_REGEX) { return isEqualOrMatchesWithJavaRegex(arg.getName(), patternArg.getName()) && isEqualOrMatchesWithJavaRegex(arg.getValue(), patternArg.getValue()); } return isArgumentMatchedWithOroRegex(arg, patternArg); }
@Test public void testIsArgumentMatched() throws Exception { Argument arg = new Argument(); Argument argp = new Argument(); assertTrue(HtmlParsingUtils.isArgumentMatched(arg, argp)); arg = new Argument("test", "abcd"); argp = new Argument("test", "a.*d"); assertTrue(HtmlParsingUtils.isArgumentMatched(arg, argp)); arg = new Argument("test", "abcd"); argp = new Argument("test", "a.*e"); assertFalse(HtmlParsingUtils.isArgumentMatched(arg, argp)); }
public static List<FileStatus> getHistoryDirsForCleaning(FileContext fc, Path root, long cutoff) throws IOException { List<FileStatus> fsList = new ArrayList<FileStatus>(); Calendar cCal = Calendar.getInstance(); cCal.setTimeInMillis(cutoff); int cYear = cCal.get(Calendar.YEAR); int cMonth = cCal.get(Calendar.MONTH) + 1; int cDate = cCal.get(Calendar.DATE); RemoteIterator<FileStatus> yearDirIt = fc.listStatus(root); while (yearDirIt.hasNext()) { FileStatus yearDir = yearDirIt.next(); try { int year = Integer.parseInt(yearDir.getPath().getName()); if (year <= cYear) { RemoteIterator<FileStatus> monthDirIt = fc.listStatus(yearDir.getPath()); while (monthDirIt.hasNext()) { FileStatus monthDir = monthDirIt.next(); try { int month = Integer.parseInt(monthDir.getPath().getName()); // If we only checked the month here, then something like 07/2013 // would incorrectly not pass when the cutoff is 06/2014 if (year < cYear || month <= cMonth) { RemoteIterator<FileStatus> dateDirIt = fc.listStatus(monthDir.getPath()); while (dateDirIt.hasNext()) { FileStatus dateDir = dateDirIt.next(); try { int date = Integer.parseInt(dateDir.getPath().getName()); // If we only checked the date here, then something like // 07/21/2013 would incorrectly not pass when the cutoff is // 08/20/2013 or 07/20/2012 if (year < cYear || month < cMonth || date <= cDate) { fsList.addAll(remoteIterToList( fc.listStatus(dateDir.getPath()))); } } catch (NumberFormatException nfe) { // the directory didn't fit the format we're looking for so // skip the dir } } } } catch (NumberFormatException nfe) { // the directory didn't fit the format we're looking for so skip // the dir } } } } catch (NumberFormatException nfe) { // the directory didn't fit the format we're looking for so skip the dir } } return fsList; }
@Test @SuppressWarnings("unchecked") public void testGetHistoryDirsForCleaning() throws IOException { Path pRoot = new Path(TEST_DIR, "org.apache.hadoop.mapreduce.v2.jobhistory." + "TestJobHistoryUtils.testGetHistoryDirsForCleaning"); FileContext fc = FileContext.getFileContext(); Calendar cCal = Calendar.getInstance(); int year = 2013; int month = 7; int day = 21; cCal.set(year, month - 1, day, 1, 0); long cutoff = cCal.getTimeInMillis(); clearDir(fc, pRoot); Path pId00 = createPath(fc, pRoot, year, month, day, "000000"); Path pId01 = createPath(fc, pRoot, year, month, day + 1, "000001"); Path pId02 = createPath(fc, pRoot, year, month, day - 1, "000002"); Path pId03 = createPath(fc, pRoot, year, month + 1, day, "000003"); Path pId04 = createPath(fc, pRoot, year, month + 1, day + 1, "000004"); Path pId05 = createPath(fc, pRoot, year, month + 1, day - 1, "000005"); Path pId06 = createPath(fc, pRoot, year, month - 1, day, "000006"); Path pId07 = createPath(fc, pRoot, year, month - 1, day + 1, "000007"); Path pId08 = createPath(fc, pRoot, year, month - 1, day - 1, "000008"); Path pId09 = createPath(fc, pRoot, year + 1, month, day, "000009"); Path pId10 = createPath(fc, pRoot, year + 1, month, day + 1, "000010"); Path pId11 = createPath(fc, pRoot, year + 1, month, day - 1, "000011"); Path pId12 = createPath(fc, pRoot, year + 1, month + 1, day, "000012"); Path pId13 = createPath(fc, pRoot, year + 1, month + 1, day + 1, "000013"); Path pId14 = createPath(fc, pRoot, year + 1, month + 1, day - 1, "000014"); Path pId15 = createPath(fc, pRoot, year + 1, month - 1, day, "000015"); Path pId16 = createPath(fc, pRoot, year + 1, month - 1, day + 1, "000016"); Path pId17 = createPath(fc, pRoot, year + 1, month - 1, day - 1, "000017"); Path pId18 = createPath(fc, pRoot, year - 1, month, day, "000018"); Path pId19 = createPath(fc, pRoot, year - 1, month, day + 1, "000019"); Path pId20 = createPath(fc, pRoot, year - 1, month, day - 1, "000020"); Path pId21 = createPath(fc, pRoot, year - 1, month + 1, day, "000021"); Path pId22 = createPath(fc, pRoot, year - 1, month + 1, day + 1, "000022"); Path pId23 = createPath(fc, pRoot, year - 1, month + 1, day - 1, "000023"); Path pId24 = createPath(fc, pRoot, year - 1, month - 1, day, "000024"); Path pId25 = createPath(fc, pRoot, year - 1, month - 1, day + 1, "000025"); Path pId26 = createPath(fc, pRoot, year - 1, month - 1, day - 1, "000026"); // non-expected names should be ignored without problems Path pId27 = createPath(fc, pRoot, "foo", "" + month, "" + day, "000027"); Path pId28 = createPath(fc, pRoot, "" + year, "foo", "" + day, "000028"); Path pId29 = createPath(fc, pRoot, "" + year, "" + month, "foo", "000029"); List<FileStatus> dirs = JobHistoryUtils .getHistoryDirsForCleaning(fc, pRoot, cutoff); Collections.sort(dirs); assertEquals(14, dirs.size()); assertEquals(pId26.toUri().getPath(), dirs.get(0).getPath().toUri().getPath()); assertEquals(pId24.toUri().getPath(), dirs.get(1).getPath().toUri().getPath()); assertEquals(pId25.toUri().getPath(), dirs.get(2).getPath().toUri().getPath()); assertEquals(pId20.toUri().getPath(), dirs.get(3).getPath().toUri().getPath()); assertEquals(pId18.toUri().getPath(), dirs.get(4).getPath().toUri().getPath()); assertEquals(pId19.toUri().getPath(), dirs.get(5).getPath().toUri().getPath()); assertEquals(pId23.toUri().getPath(), dirs.get(6).getPath().toUri().getPath()); assertEquals(pId21.toUri().getPath(), dirs.get(7).getPath().toUri().getPath()); assertEquals(pId22.toUri().getPath(), dirs.get(8).getPath().toUri().getPath()); assertEquals(pId08.toUri().getPath(), dirs.get(9).getPath().toUri().getPath()); assertEquals(pId06.toUri().getPath(), dirs.get(10).getPath().toUri().getPath()); assertEquals(pId07.toUri().getPath(), dirs.get(11).getPath().toUri().getPath()); assertEquals(pId02.toUri().getPath(), dirs.get(12).getPath().toUri().getPath()); assertEquals(pId00.toUri().getPath(), dirs.get(13).getPath().toUri().getPath()); }
@Override public Option<Map<String, String>> getMetadata() { Map<String, String> metadata = new HashMap<>(); if (eventTime.isPresent()) { metadata.put(METADATA_EVENT_TIME_KEY, String.valueOf(eventTime.get())); } return metadata.isEmpty() ? Option.empty() : Option.of(metadata); }
@Test public void testGetEmptyMetadata() { GenericRecord record = new GenericData.Record(schema); record.put("id", "1"); record.put("partition", "partition0"); record.put("ts", 0L); record.put("_hoodie_is_deleted", false); DefaultHoodieRecordPayload payload = new DefaultHoodieRecordPayload(Option.of(record)); assertFalse(payload.getMetadata().isPresent()); }
@Override public int hashCode() { return serializer != null ? serializer.hashCode() : 0; }
@Test public void testAdaptorEqualAndHashCode() { ByteArraySerializerAdapter theOther = new ByteArraySerializerAdapter(serializer); ByteArraySerializerAdapter theEmptyOne = new ByteArraySerializerAdapter(null); assertEquals(adapter, adapter); assertEquals(adapter, theOther); assertNotEquals(null, adapter); assertNotEquals("Not An Adaptor", adapter); assertNotEquals(adapter, theEmptyOne); assertEquals(adapter.hashCode(), serializer.hashCode()); assertEquals(0, theEmptyOne.hashCode()); }
private void add(K key, Set<NodeID> newNodesWithThisKey) { // Step 1 : update existing entries to the updated situation final Iterator<Map.Entry<NodeID, Set<K>>> iter = reverseCacheRepresentation.entrySet().iterator(); while (iter.hasNext()) { final Map.Entry<NodeID, Set<K>> existingEntry = iter.next(); final NodeID existingEntryNodeID = existingEntry.getKey(); if (newNodesWithThisKey.contains(existingEntryNodeID)) { existingEntry.getValue().add(key); } } // Step 2 : add entries for node ids that were not in the reverse lookup before for (final NodeID nodeIdForWhichTheValueExists : newNodesWithThisKey) { if (!reverseCacheRepresentation.containsKey(nodeIdForWhichTheValueExists)) { reverseCacheRepresentation.computeIfAbsent(nodeIdForWhichTheValueExists, k -> new HashSet<>()).add(key); } } }
@Test public void testAdd() throws Exception { // Setup text fixture, Simulating things for a cache with this signature: Cache<String, Set<NodeID>> cache; final Map<NodeID, Set<String>> reverseLookupMap = new HashMap<>(); final Function<Set<NodeID>, Set<NodeID>> deducer = nodeIDS -> nodeIDS; final ReverseLookupComputingCacheEntryListener<String, Set<NodeID>> listener = new ReverseLookupComputingCacheEntryListener<>(reverseLookupMap, deducer); final NodeID clusterNode = NodeID.getInstance(UUID.randomUUID().toString().getBytes()); // Execute system under test. listener.entryAdded("somekey", Collections.singleton(clusterNode), clusterNode); // Assert result assertTrue(reverseLookupMap.containsKey(clusterNode)); assertTrue(reverseLookupMap.get(clusterNode).contains("somekey")); }
public static Map<String, String> revertSubscribe(Map<String, String> subscribe) { Map<String, String> newSubscribe = new HashMap<>(); for (Map.Entry<String, String> entry : subscribe.entrySet()) { String serviceName = entry.getKey(); String serviceQuery = entry.getValue(); if (StringUtils.isContains(serviceName, ':') && StringUtils.isContains(serviceName, '/')) { Map<String, String> params = StringUtils.parseQueryString(serviceQuery); String name = serviceName; int i = name.indexOf('/'); if (i >= 0) { params.put(GROUP_KEY, name.substring(0, i)); name = name.substring(i + 1); } i = name.lastIndexOf(':'); if (i >= 0) { params.put(VERSION_KEY, name.substring(i + 1)); name = name.substring(0, i); } newSubscribe.put(name, StringUtils.toQueryString(params)); } else { newSubscribe.put(serviceName, serviceQuery); } } return newSubscribe; }
@Test void testRevertSubscribe2() { String key = "dubbo.test.api.HelloService"; Map<String, String> subscribe = new HashMap<String, String>(); subscribe.put(key, null); Map<String, String> newSubscribe = UrlUtils.revertSubscribe(subscribe); assertEquals(subscribe, newSubscribe); }
public static TableRecords buildRecords(TableMeta tmeta, ResultSet resultSet) throws SQLException { TableRecords records = new TableRecords(tmeta); ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); Set<String> ignoreCasePKs = tmeta.getCaseInsensitivePKs(); int columnCount = resultSetMetaData.getColumnCount(); while (resultSet.next()) { List<Field> fields = new ArrayList<>(columnCount); for (int i = 1; i <= columnCount; i++) { String colName = resultSetMetaData.getColumnName(i); ColumnMeta col = getColumnMeta(tmeta,colName); int dataType = col.getDataType(); Field field = new Field(); field.setName(col.getColumnName()); if (ignoreCasePKs.contains(colName)) { field.setKeyType(KeyType.PRIMARY_KEY); } field.setType(dataType); // mysql will not run in this code // cause mysql does not use java.sql.Blob, java.sql.sql.Clob to process Blob and Clob column if (dataType == Types.BLOB) { Blob blob = resultSet.getBlob(i); if (blob != null) { field.setValue(new SerialBlob(blob)); } } else if (dataType == Types.CLOB) { Clob clob = resultSet.getClob(i); if (clob != null) { field.setValue(new SerialClob(clob)); } } else if (dataType == Types.NCLOB) { NClob object = resultSet.getNClob(i); if (object != null) { field.setValue(new SerialClob(object)); } } else if (dataType == Types.ARRAY) { Array array = resultSet.getArray(i); if (array != null) { field.setValue(new SerialArray(array)); } } else if (dataType == Types.REF) { Ref ref = resultSet.getRef(i); if (ref != null) { field.setValue(new SerialRef(ref)); } } else if (dataType == Types.DATALINK) { java.net.URL url = resultSet.getURL(i); if (url != null) { field.setValue(new SerialDatalink(url)); } } else if (dataType == Types.JAVA_OBJECT) { Object object = resultSet.getObject(i); if (object != null) { field.setValue(new SerialJavaObject(object)); } } else if (dataType == TIMESTAMP_WITH_TIME_ZONE || dataType == TIMESTAMP_WITH_LOCAL_TIME_ZONE) { field.setValue(convertOffSetTime(timeToOffsetDateTime(resultSet.getBytes(i)))); } else { // JDBCType.DISTINCT, JDBCType.STRUCT etc... field.setValue(holdSerialDataType(resultSet.getObject(i))); } fields.add(field); } Row row = new Row(); row.setFields(fields); records.add(row); } return records; }
@Test public void testBuildRecords() throws SQLException { MockDriver mockDriver = new MockDriver(returnValueColumnLabels, returnValue, columnMetas, indexMetas); DruidDataSource dataSource = new DruidDataSource(); dataSource.setUrl("jdbc:mock:xxx"); dataSource.setDriver(mockDriver); MockStatementBase mockStatement = new MockStatement(dataSource.getConnection().getConnection()); DataSourceProxy proxy = DataSourceProxyTest.getDataSourceProxy(dataSource); TableMeta tableMeta = TableMetaCacheFactory.getTableMetaCache(JdbcConstants.MYSQL).getTableMeta(proxy.getPlainConnection(), "table_records_test", proxy.getResourceId()); ResultSet resultSet = mockDriver.executeQuery(mockStatement, "select * from table_records_test"); TableRecords tableRecords = TableRecords.buildRecords(tableMeta, resultSet); Assertions.assertNotNull(tableRecords); }
static Map<String, Object> of(final Task task) { return Map.of( "id", task.getId(), "type", task.getType() ); }
@Test void shouldGetVariablesGivenFlowWithTenant() { Map<String, Object> variables = new RunVariables.DefaultBuilder() .withFlow(Flow .builder() .id("id-value") .namespace("namespace-value") .revision(42) .tenantId("tenant-value") .build() ) .build(new RunContextLogger()); Assertions.assertEquals(Map.of( "id", "id-value", "namespace", "namespace-value", "revision", 42, "tenantId", "tenant-value" ), variables.get("flow")); }
public static int getInt(String property, JsonNode node) { Preconditions.checkArgument(node.has(property), "Cannot parse missing int: %s", property); JsonNode pNode = node.get(property); Preconditions.checkArgument( pNode != null && !pNode.isNull() && pNode.isIntegralNumber() && pNode.canConvertToInt(), "Cannot parse to an integer value: %s: %s", property, pNode); return pNode.asInt(); }
@Test public void getInt() throws JsonProcessingException { assertThatThrownBy(() -> JsonUtil.getInt("x", JsonUtil.mapper().readTree("{}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing int: x"); assertThatThrownBy(() -> JsonUtil.getInt("x", JsonUtil.mapper().readTree("{\"x\": null}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to an integer value: x: null"); assertThatThrownBy(() -> JsonUtil.getInt("x", JsonUtil.mapper().readTree("{\"x\": \"23\"}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to an integer value: x: \"23\""); assertThatThrownBy(() -> JsonUtil.getInt("x", JsonUtil.mapper().readTree("{\"x\": 23.0}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to an integer value: x: 23.0"); assertThat(JsonUtil.getInt("x", JsonUtil.mapper().readTree("{\"x\": 23}"))).isEqualTo(23); }
public void updateAuthenticationStatus(AdSession adSession, AdAuthenticationStatus adStatus) { if (adStatus != null) { adSession.setAuthenticationStatus(adStatus.label); adSessionRepository.save(adSession); } }
@Test public void updateAuthenticationStatusTest() { AdSession adSession = adAuthenticationMapper.authenticationRequestToAdSession("callbackUrl", new AuthenticationRequest(), new ArrayList<>()); adService.updateAuthenticationStatus(adSession, STATUS_INVALID); verify(adSessionRepositoryMock, times(1)).save(any(AdSession.class)); }
public Object getAttribute(File file, String attribute) { String view = getViewName(attribute); String attr = getSingleAttribute(attribute); return getAttribute(file, view, attr); }
@Test public void testGetAttribute_fromInheritedProvider() { File file = createFile(); assertThat(service.getAttribute(file, "test:isRegularFile")).isEqualTo(false); assertThat(service.getAttribute(file, "test:isDirectory")).isEqualTo(true); assertThat(service.getAttribute(file, "test", "fileKey")).isEqualTo(0); }
@Override public ExecuteContext after(ExecuteContext context) { if (InvokeUtils.isKafkaInvokeBySermant(Thread.currentThread().getStackTrace())) { return context; } KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(context.getObject().hashCode()); if (kafkaConsumerWrapper == null) { return context; } updateKafkaConsumerWrapper(kafkaConsumerWrapper); if (handler != null) { handler.doAfter(context); } else { LOGGER.info("Try to check if it is need to disable consumption after assignment..."); // The host application checks whether it is necessary to unsubscribe from the Topic every time it // subscribes KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, ProhibitionConfigManager.getKafkaProhibitionTopics()); } return context; }
@Test public void testAfter() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.after(context); KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(mockConsumer.hashCode()); Assert.assertEquals(topicPartitions, kafkaConsumerWrapper.getOriginalPartitions()); Assert.assertEquals(Collections.singleton("testTopic-1"), kafkaConsumerWrapper.getOriginalTopics()); Assert.assertTrue(kafkaConsumerWrapper.isAssign()); }
@Override public boolean containsKey(Object key) { return false; }
@Test public void testContainsKey() throws Exception { assertFalse(NULL_QUERY_CACHE.containsKey(1)); }
@Override public Optional<Deployment> getDeployment(ApplicationId application) { return duperModel.getInfraApplication(application).map(InfraDeployment::new); }
@Test public void activate() { infrastructureVersions.setTargetVersion(nodeType, target, false); addNode(1, Node.State.failed, Optional.of(oldVersion)); addNode(2, Node.State.parked, Optional.of(target)); addNode(3, Node.State.active, Optional.of(target)); addNode(4, Node.State.inactive, Optional.of(target)); addNode(5, Node.State.dirty, Optional.empty()); addNode(6, Node.State.ready, Optional.empty()); Node node7 = addNode(7, Node.State.active, Optional.of(target)); nodeRepository.nodes().setRemovable(NodeList.of(node7), false); infraDeployer.getDeployment(application.getApplicationId()).orElseThrow().activate(); verify(duperModelInfraApi, never()).infraApplicationRemoved(any()); verifyActivated("node-3", "node-6"); }
@Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); }
@Test public void testThatIntermittentlyFailingEntriesAreEnqueuedOnToTheBufferWithCorrectOrder() throws IOException, InterruptedException { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder() .context(sinkInitContext) .maxBatchSizeInBytes(210) .maxRecordSizeInBytes(110) .simulateFailures(true) .build(); sink.write(String.valueOf(228)); // Buffer: 100/210B; 1/10 elements; 0 inflight sink.write(String.valueOf(225)); // Buffer: 200/210B; 2/10 elements; 0 inflight sink.write(String.valueOf(1)); // Buffer: 204/210B; 3/10 elements; 0 inflight sink.write(String.valueOf(2)); // Buffer: 208/210B; 4/10 elements; 0 inflight sink.write(String.valueOf(3)); // Buffer: 212/210B; 5/10 elements; 0 inflight -- flushing assertThat(res.size()) .isEqualTo(2); // Request was [228, 225, 1, 2], element 228, 225 failed sink.write(String.valueOf(4)); // Buffer: 8/210B; 2/10 elements; 2 inflight sink.write(String.valueOf(5)); // Buffer: 12/210B; 3/10 elements; 2 inflight sink.write(String.valueOf(6)); // Buffer: 16/210B; 4/10 elements; 2 inflight sink.write(String.valueOf(328)); // Buffer: 116/210B; 5/10 elements; 2 inflight sink.write(String.valueOf(325)); // Buffer: 216/210B; 6/10 elements; 2 inflight -- flushing // inflight request is processed, buffer: [228, 225, 3, 4, 5, 6, 328, 325] assertThat(res).isEqualTo(Arrays.asList(1, 2, 228, 225, 3, 4)); // Buffer: [5, 6, 328, 325]; 0 inflight }
protected void declareRuleFromCharacteristic(final Characteristic characteristic, final String parentPath, final List<KiePMMLDroolsRule> rules, final String statusToSet, final boolean isLastCharacteristic) { logger.trace("declareRuleFromCharacteristic {} {}", characteristic, parentPath); String currentRule = String.format(PATH_PATTERN, parentPath, characteristic.getName()); final List<Attribute> attributes = characteristic.getAttributes(); for (int i = 0; i < attributes.size(); i++) { declareRuleFromAttribute(attributes.get(i), currentRule, i, rules, statusToSet, characteristic.getReasonCode(), characteristic.getBaselineScore(), isLastCharacteristic); } }
@Test void declareRuleFromCharacteristicNotLastCharacteristic() { Characteristic characteristic = getCharacteristic(); final String parentPath = "parent_path"; final List<KiePMMLDroolsRule> rules = new ArrayList<>(); final String statusToSet = "status_to_set"; final boolean isLastCharacteristic = false; String[] expectedConstraints = {"value <= 5.0", "value >= 5.0 && value < 12.0"}; int[] expectedOperatorValuesSizes = {1, 2}; getKiePMMLScorecardModelCharacteristicASTFactory() .declareRuleFromCharacteristic(characteristic, parentPath, rules, statusToSet, isLastCharacteristic); assertThat(rules).hasSameSizeAs(characteristic.getAttributes()); for (int i = 0; i < rules.size(); i++) { commonValidateRule(rules.get(i), characteristic.getAttributes().get(i), statusToSet, parentPath + "_AgeScore", i, isLastCharacteristic, 1, null, BOOLEAN_OPERATOR.AND, expectedConstraints[i], expectedOperatorValuesSizes[i] ); } }
public static ErrorHandler setupErrorHandler(final ErrorHandler userErrorHandler, final DistinctErrorLog errorLog) { return setupErrorHandler(userErrorHandler, errorLog, fallbackLogger()); }
@Test void setupErrorHandlerReturnsAnErrorHandlerThatFirstInvokesLoggingErrorHandlerBeforeCallingSuppliedErrorHandler() { final Throwable throwable = new Throwable("Hello, world!"); final ErrorHandler userErrorHandler = mock(ErrorHandler.class); final AssertionError userHandlerError = new AssertionError("user handler error"); doThrow(userHandlerError).when(userErrorHandler).onError(throwable); final DistinctErrorLog distinctErrorLog = mock(DistinctErrorLog.class); doReturn(true).when(distinctErrorLog).record(any(Throwable.class)); final InOrder inOrder = inOrder(userErrorHandler, distinctErrorLog); final ErrorHandler errorHandler = CommonContext.setupErrorHandler(userErrorHandler, distinctErrorLog); assertNotNull(errorHandler); assertNotSame(userErrorHandler, errorHandler); final AssertionError error = assertThrowsExactly(AssertionError.class, () -> errorHandler.onError(throwable)); assertSame(userHandlerError, error); inOrder.verify(distinctErrorLog).record(throwable); inOrder.verify(userErrorHandler).onError(throwable); inOrder.verifyNoMoreInteractions(); }
public static Base64URL getAccessTokenHash(JWSAlgorithm signingAlg, OAuth2AccessTokenEntity token) { byte[] tokenBytes = token.getJwt().serialize().getBytes(); return getHash(signingAlg, tokenBytes); }
@Test public void getAccessTokenHash384() { /* * independently generate hash ascii of token = eyJhbGciOiJub25lIn0.eyJhbGciOiJFUzM4NCIsInN1YiI6ImFub3RoZXJfdXNlciIsImlzcyI6Ind3dy5hbm90aGVyLWV4YW1wbGUubmV0IiwidHlwIjoiSldUIn0. base64url of hash = BWfFK73PQI36M1rg9R6VjMyWOE0-XvBK */ mockToken384.getJwt().serialize(); Base64URL expectedHash = new Base64URL("BWfFK73PQI36M1rg9R6VjMyWOE0-XvBK"); Base64URL resultHash = IdTokenHashUtils.getAccessTokenHash(JWSAlgorithm.ES384, mockToken384); assertEquals(expectedHash, resultHash); }
@Override public void gauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) { Id metricId = suffixBaseId(id).withTags(tagNameValuePairs); PolledMeter.remove(registry, metricId); PolledMeter.using(registry) .withId(metricId) .monitorValue(supplier, ignore -> supplier.get().doubleValue()); }
@Test public void testGuage() { DefaultRegistry registry = new DefaultRegistry(); SpectatorMetricRegistry metricRegistry = new SpectatorMetricRegistry(registry, registry.createId("foo")); metricRegistry.gauge("bar", () -> 10); PolledMeter.update(registry); Assert.assertEquals(10.0, registry.gauge(registry.createId("foo.bar")).value(), 0); }
public static Struct beamRowToStruct(Row row) { Struct.Builder structBuilder = Struct.newBuilder(); List<Schema.Field> fields = row.getSchema().getFields(); fields.forEach( field -> { String column = field.getName(); switch (field.getType().getTypeName()) { case ROW: @Nullable Row subRow = row.getRow(column); if (subRow == null) { structBuilder.set(column).to(beamTypeToSpannerType(field.getType()), null); } else { structBuilder .set(column) .to(beamTypeToSpannerType(field.getType()), beamRowToStruct(subRow)); } break; case ARRAY: addIterableToStructBuilder(structBuilder, row.getArray(column), field); break; case ITERABLE: addIterableToStructBuilder(structBuilder, row.getIterable(column), field); break; case FLOAT: structBuilder.set(column).to(row.getFloat(column)); break; case DOUBLE: structBuilder.set(column).to(row.getDouble(column)); break; case INT16: @Nullable Short int16 = row.getInt16(column); if (int16 == null) { structBuilder.set(column).to((Long) null); } else { structBuilder.set(column).to(int16); } break; case INT32: @Nullable Integer int32 = row.getInt32(column); if (int32 == null) { structBuilder.set(column).to((Long) null); } else { structBuilder.set(column).to(int32); } break; case INT64: structBuilder.set(column).to(row.getInt64(column)); break; case DECIMAL: @Nullable BigDecimal decimal = row.getDecimal(column); // BigDecimal is not nullable if (decimal == null) { checkNotNull(decimal, "Null decimal at column " + column); } else { structBuilder.set(column).to(decimal); } break; // TODO: implement logical type date and timestamp case DATETIME: @Nullable ReadableDateTime dateTime = row.getDateTime(column); if (dateTime == null) { structBuilder.set(column).to((Timestamp) null); } else { structBuilder.set(column).to(Timestamp.parseTimestamp(dateTime.toString())); } break; case STRING: structBuilder.set(column).to(row.getString(column)); break; case BYTE: @Nullable Byte byteValue = row.getByte(column); if (byteValue == null) { structBuilder.set(column).to((Long) null); } else { structBuilder.set(column).to(byteValue); } break; case BYTES: byte @Nullable [] bytes = row.getBytes(column); if (bytes == null) { structBuilder.set(column).to((ByteArray) null); } else { structBuilder.set(column).to(ByteArray.copyFrom(bytes)); } break; case BOOLEAN: structBuilder.set(column).to(row.getBoolean(column)); break; default: throw new IllegalArgumentException( String.format( "Unsupported beam type '%s' while translating row to struct.", field.getType().getTypeName())); } }); return structBuilder.build(); }
@Test public void testBeamRowToStructNullDecimalNullShouldFail() { Schema schema = getSchemaTemplate().addNullableField("f_decimal", Schema.FieldType.DECIMAL).build(); Row row = getRowBuilder(schema).addValue(null).build(); NullPointerException npe = assertThrows(NullPointerException.class, () -> StructUtils.beamRowToStruct(row)); String message = npe.getMessage(); checkMessage("Null", message); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_EmptyDescription_EnabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, true ); JobMeta jobMeta = new JobMeta(); jobMeta.setDescription( "" ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( jobMeta ); assertNotNull( feedbackList ); assertFalse( feedbackList.isEmpty() ); ImportValidationFeedback feedback = feedbackList.get( 0 ); assertNotNull( feedback ); assertEquals( ImportValidationResultType.ERROR, feedback.getResultType() ); assertTrue( feedback.isError() ); }
@Override public void close() throws IOException { try { flow.close(); } catch (Exception e) { throw new IOException(e); } }
@Test public void testClose() throws Exception { this.auth.close(); verify(this.flow).close(); }
public static String getPermanentFileName(String path) { if (isTemporaryFileName(path)) { return path.substring(0, path.length() - TEMPORARY_SUFFIX_LENGTH); } return path; }
@Test public void getPermanentFileName() { assertEquals("/", PathUtils.getPermanentFileName(PathUtils.temporaryFileName(1, "/"))); assertEquals("/", PathUtils.getPermanentFileName(PathUtils.temporaryFileName(0xFFFFFFFFFFFFFFFFL, "/"))); assertEquals("/foo.alluxio.0x0123456789ABCDEF.tmp", PathUtils .getPermanentFileName(PathUtils.temporaryFileName(14324, "/foo.alluxio.0x0123456789ABCDEF.tmp"))); }
@Override @GuardedBy("getLock()") public PageInfo removePage(PageId pageId, boolean isTemporary) throws PageNotFoundException { if (!mPages.contains(INDEX_PAGE_ID, pageId)) { throw new PageNotFoundException(String.format("Page %s could not be found", pageId)); } PageInfo pageInfo = mPages.getFirstByField(INDEX_PAGE_ID, pageId); mPages.remove(pageInfo); mBytes.addAndGet(-pageInfo.getPageSize()); Metrics.SPACE_USED.dec(pageInfo.getPageSize()); if (isTemporary) { pageInfo.getLocalCacheDir().deleteTempPage(pageInfo); } else { pageInfo.getLocalCacheDir().deletePage(pageInfo); } return pageInfo; }
@Test public void removeNotExist() throws Exception { assertThrows(PageNotFoundException.class, () -> { assertEquals(mPageInfo, mMetaStore.removePage(mPage)); }); assertEquals(0, mCachedPageGauge.getValue()); }
public boolean isSuccess() { return httpStatus != null && httpStatus >= 200 && httpStatus < 300; }
@Test public void isSuccess_returns_false_if_http_response_returns_error_status() { WebhookDelivery delivery = newBuilderTemplate() .setHttpStatus(404) .build(); assertThat(delivery.isSuccess()).isFalse(); }
static public boolean createMissingParentDirectories(File file) { File parent = file.getParentFile(); if (parent == null) { // Parent directory not specified, therefore it's a request to // create nothing. Done! ;) return true; } // File.mkdirs() creates the parent directories only if they don't // already exist; and it's okay if they do. parent.mkdirs(); return parent.exists(); }
@Test public void createParentDirIgnoresExistingDir() { String target = CoreTestConstants.OUTPUT_DIR_PREFIX + "/fu" + diff + "/testing.txt"; File file = new File(target); cleanupList.add(file); file.mkdirs(); assertTrue(file.getParentFile().exists()); assertTrue(FileUtil.createMissingParentDirectories(file)); }
@Override public Set<City> findAllByCountry(Country country) { return cityRepository.findAllByCountry(country); }
@Test void findAllByCountry() { City city = createCity(); Country country = city.getCanton().getCountry(); Mockito.when(cityRepository.findAllByCountry(country)) .thenReturn(Collections.singleton(city)); Set<City> expected = Set.of(city); Set<City> actual = cityService.findAllByCountry(country); ReflectionAssert.assertReflectionEquals(expected, actual); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldLoadAntBuilder() throws Exception { CruiseConfig cruiseConfig = xmlLoader.loadConfigHolder(CONFIG_WITH_ANT_BUILDER).config; JobConfig plan = cruiseConfig.jobConfigByName("pipeline1", "mingle", "cardlist", true); assertThat(plan.tasks()).hasSize(1); AntTask builder = (AntTask) plan.tasks().first(); assertThat(builder.getTarget()).isEqualTo("all"); final ArtifactTypeConfigs cardListArtifacts = cruiseConfig.jobConfigByName("pipeline1", "mingle", "cardlist", true).artifactTypeConfigs(); assertThat(cardListArtifacts.size()).isEqualTo(1); ArtifactTypeConfig artifactConfigPlan = cardListArtifacts.get(0); assertThat(artifactConfigPlan.getArtifactType()).isEqualTo(ArtifactType.test); }
public void convertQueueHierarchy(FSQueue queue) { List<FSQueue> children = queue.getChildQueues(); final String queueName = queue.getName(); emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); emitChildCapacity(queue); emitMaximumCapacity(queueName, queue); emitSizeBasedWeight(queueName); emitOrderingPolicy(queueName, queue); checkMaxChildCapacitySetting(queue); emitDefaultUserLimitFactor(queueName, children); for (FSQueue childQueue : children) { convertQueueHierarchy(childQueue); } }
@Test public void testReservationSystemNotSupported() { converter = builder.build(); expectedException.expect(UnsupportedPropertyException.class); expectedException.expectMessage("maxCapacity"); Mockito.doThrow(new UnsupportedPropertyException("maxCapacity")) .when(ruleHandler).handleMaxChildCapacity(); yarnConfig.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); converter.convertQueueHierarchy(rootQueue); }
public void putString(String key, String str) { checkNotNull(key); checkNotNull(str); put(key, str); }
@Test void testArrayInvalidValues() { DescriptorProperties properties = new DescriptorProperties(); properties.putString(ARRAY_KEY + ".0", "12"); properties.putString(ARRAY_KEY + ".1", "66"); properties.putString(ARRAY_KEY + ".2", "INVALID"); assertThatThrownBy(() -> testArrayValidation(properties, 1, Integer.MAX_VALUE)) .isInstanceOf(ValidationException.class); }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException("Invalid p: " + p); } return Math.exp(-1.41421356237309505 * sigma * Erf.inverfc(2.0 * p) + mu); }
@Test public void testQuantile() { System.out.println("quantile"); LogNormalDistribution instance = new LogNormalDistribution(1.0, 1.0); instance.rand(); assertEquals(0.2654449, instance.quantile(0.01), 1E-7); assertEquals(0.754612, instance.quantile(0.1), 1E-6); assertEquals(1.171610, instance.quantile(0.2), 1E-6); assertEquals(1.608978, instance.quantile(0.3), 1E-6); assertEquals(9.791861, instance.quantile(0.9), 1E-6); assertEquals(27.83649, instance.quantile(0.99), 1E-5); }
@Override public R transform(final K readOnlyKey, final GenericRow value) { return delegate.transform( readOnlyKey, value, context.orElseThrow(() -> new IllegalStateException("Not initialized")) ); }
@Test public void shouldInvokeInnerTransformer() { // When: ksTransformer.transform(KEY, VALUE); // Then: verify(ksqlTransformer).transform( eq(KEY), eq(VALUE), any() ); }
@Override protected FieldValue doGet(String fieldName, EventWithContext eventWithContext) { final ImmutableMap.Builder<String, Object> dataModelBuilder = ImmutableMap.builder(); if (eventWithContext.messageContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.messageContext().get().getFields()); } else if (eventWithContext.eventContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.eventContext().get().toDto().fields()); } final ImmutableMap<String, Object> dataModel = dataModelBuilder.build(); if (!isValidTemplate(config.template(), dataModel)) { return FieldValue.error(); } try { return FieldValue.string(templateEngine.transform(config.template(), dataModel)); } catch (Exception e) { LOG.error("Couldn't render field template \"{}\"", config.template(), e); return FieldValue.error(); } }
@Test public void templateWithEventContext() { final TestEvent event = new TestEvent(); final TestEvent eventContext = new TestEvent(); eventContext.setField("hello", FieldValue.string("event")); final EventWithContext eventWithContext = EventWithContext.create(event, eventContext); final FieldValue fieldValue = newTemplate("hello: ${source.hello}").doGet("test", eventWithContext); assertThat(fieldValue.value()).isEqualTo("hello: event"); }
public MonitorBuilder version(String version) { this.version = version; return getThis(); }
@Test void version() { MonitorBuilder builder = MonitorBuilder.newBuilder(); builder.version("version"); Assertions.assertEquals("version", builder.build().getVersion()); }
public Map<InjectionMetadata.InjectedElement, ReferenceBean<?>> getInjectedMethodReferenceBeanMap() { Map<InjectionMetadata.InjectedElement, ReferenceBean<?>> map = new HashMap<>(); for (Map.Entry<InjectionMetadata.InjectedElement, String> entry : injectedMethodReferenceBeanCache.entrySet()) { map.put(entry.getKey(), referenceBeanManager.getById(entry.getValue())); } return Collections.unmodifiableMap(map); }
@Test void testGetInjectedMethodReferenceBeanMap() { ReferenceAnnotationBeanPostProcessor beanPostProcessor = getReferenceAnnotationBeanPostProcessor(); Map<InjectionMetadata.InjectedElement, ReferenceBean<?>> referenceBeanMap = beanPostProcessor.getInjectedMethodReferenceBeanMap(); Assertions.assertEquals(4, referenceBeanMap.size()); Map<String, Integer> checkingMethodNames = new HashMap<>(); checkingMethodNames.put("setDemoServiceFromAncestor", 0); checkingMethodNames.put("setDemoService", 0); checkingMethodNames.put("setHelloService2", 0); checkingMethodNames.put("setHelloService3", 0); for (Map.Entry<InjectionMetadata.InjectedElement, ReferenceBean<?>> entry : referenceBeanMap.entrySet()) { InjectionMetadata.InjectedElement injectedElement = entry.getKey(); java.lang.reflect.Method method = (java.lang.reflect.Method) injectedElement.getMember(); Integer count = checkingMethodNames.get(method.getName()); Assertions.assertNotNull(count); Assertions.assertEquals(0, count.intValue()); checkingMethodNames.put(method.getName(), count + 1); } for (Map.Entry<String, Integer> entry : checkingMethodNames.entrySet()) { Assertions.assertEquals(1, entry.getValue().intValue(), "check method element failed: " + entry.getKey()); } }
@Override protected String buildUndoSQL() { return super.buildUndoSQL(); }
@Test public void buildUndoSQL() { String sql = executor.buildUndoSQL().toUpperCase(); Assertions.assertNotNull(sql); Assertions.assertTrue(sql.contains("INSERT")); Assertions.assertTrue(sql.contains("TABLE_NAME")); Assertions.assertTrue(sql.contains("ID")); }
private static Response getResponse(Throwable t) { if (t instanceof WebApplicationException e) { return e.getResponse(); } else { return Response .status(Status.INTERNAL_SERVER_ERROR.getStatusCode(), t.getMessage()) .entity(new ErrorData(getExceptionData(t))) .type(MediaType.APPLICATION_JSON) .build(); } }
@Test public void testWebApplicationException() { WebApplicationException wae = new WebApplicationException("test web application exception", Status.TEMPORARY_REDIRECT); RestException testException = new RestException(wae); assertEquals(Status.TEMPORARY_REDIRECT.getStatusCode(), testException.getResponse().getStatus()); assertEquals(wae.getResponse().getEntity(), testException.getResponse().getEntity()); }
static void parseRootless(final StringReader reader, final Host host, final Consumer<HostParserException> decorator) throws HostParserException { // This is not RFC-compliant. // * Rootless-path must not include authentication information. final boolean userInfoResult = parseUserInfo(reader, host, decorator); if(host.getProtocol().isHostnameConfigurable() && StringUtils.isWhitespace(host.getHostname())) { // This is not RFC-compliant. // We assume for hostconfigurable-empty-hostnames a hostname on first path segment parseHostname(reader, host, decorator); } parsePath(reader, host, false, decorator); }
@Test public void testParseRootlessWithUser() throws HostParserException { final Host host = new Host(new TestProtocol() { @Override public boolean isHostnameConfigurable() { return false; } }); final String path = "user@path/sub/directory"; final HostParser.StringReader reader = new HostParser.StringReader(path); HostParser.parseRootless(reader, host, null); assertEquals("user", host.getCredentials().getUsername()); assertEquals("path/sub/directory", host.getDefaultPath()); }
@Override protected void delete(Collection<HadoopResourceId> resourceIds) throws IOException { for (HadoopResourceId resourceId : resourceIds) { // ignore response as issues are surfaced with exception final Path resourcePath = resourceId.toPath(); resourcePath.getFileSystem(configuration).delete(resourceId.toPath(), false); } }
@Test public void testDelete() throws Exception { create("testFileA", "testDataA".getBytes(StandardCharsets.UTF_8)); create("testFileB", "testDataB".getBytes(StandardCharsets.UTF_8)); create("testFileC", "testDataC".getBytes(StandardCharsets.UTF_8)); // ensure files exist assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("testFileA", 0)); assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("testFileB", 0)); assertArrayEquals("testDataC".getBytes(StandardCharsets.UTF_8), read("testFileC", 0)); fileSystem.delete(ImmutableList.of(testPath("testFileA"), testPath("testFileC"))); List<MatchResult> results = fileSystem.match(ImmutableList.of(testPath("testFile*").toString())); assertThat( results, contains( MatchResult.create( Status.OK, ImmutableList.of( Metadata.builder() .setResourceId(testPath("testFileB")) .setIsReadSeekEfficient(true) .setSizeBytes("testDataB".getBytes(StandardCharsets.UTF_8).length) .setLastModifiedMillis(lastModified("testFileB")) .build())))); }
protected String getResourceName(String resourceName, /*@NonNull*/ Method method) { // If resource name is present in annotation, use this value. if (StringUtil.isNotBlank(resourceName)) { return resourceName; } // Parse name of target method. return MethodUtil.resolveMethodName(method); }
@Test public void testGetResourceName() throws Exception { Method method = FooService.class.getMethod("random"); String resourceName = "someRandom"; String expectedResolvedName = FooService.class.getName() + ":random()"; assertThat(getResourceName(resourceName, method)).isEqualTo(resourceName); assertThat(getResourceName(null, method)).isEqualTo(expectedResolvedName); assertThat(getResourceName("", method)).isEqualTo(expectedResolvedName); }