focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Service createGenericEventService(String name, String version, String event, String referencePayload) throws EntityAlreadyExistsException { log.info("Creating a new Service '{}-{}' for generic event {}", name, version, event); // Check if corresponding Service already exists. Service existingService = serviceRepository.findByNameAndVersion(name, version); if (existingService != null) { log.warn("A Service '{}-{}' is already existing. Throwing an Exception", name, version); throw new EntityAlreadyExistsException( String.format("Service '%s-%s' is already present in store", name, version)); } // Create new service with GENERIC_EVENT type. Service service = new Service(); service.setName(name); service.setVersion(version); service.setType(ServiceType.GENERIC_EVENT); service.setMetadata(new Metadata()); // Now create basic crud operations for the resource. Operation subscribeOp = new Operation(); subscribeOp.setName("SUBSCRIBE " + event); subscribeOp.setMethod("SUBSCRIBE"); subscribeOp.setDefaultDelay(defaultAsyncFrequency); // Create bindings for Kafka and Websockets. Binding kafkaBinding = new Binding(BindingType.KAFKA); kafkaBinding.setKeyType("string"); Binding wsBinding = new Binding(BindingType.WS); wsBinding.setMethod("POST"); subscribeOp.addBinding(BindingType.KAFKA.name(), kafkaBinding); subscribeOp.addBinding(BindingType.WS.name(), wsBinding); service.addOperation(subscribeOp); serviceRepository.save(service); log.info("Having created Service '{}' for generic event {}", service.getId(), event); // If reference payload is provided, record a first resource. if (referencePayload != null) { Resource artifact = new Resource(); artifact.setName(event + "-asyncapi.yaml"); artifact.setType(ResourceType.ASYNC_API_SPEC); artifact.setServiceId(service.getId()); artifact.setSourceArtifact(event + "-asyncapi.yaml"); artifact.setContent(buildAsyncAPISpecContent(service, event, referencePayload)); resourceRepository.save(artifact); EventMessage eventMessage = new EventMessage(); eventMessage.setName("Reference"); eventMessage.setContent(referencePayload); eventMessage.setOperationId(IdBuilder.buildOperationId(service, subscribeOp)); eventMessage.setMediaType("application/json"); eventMessageRepository.save(eventMessage); log.info("Having created resource '{}' for Service '{}'", artifact.getId(), service.getId()); } // Publish a Service create event before returning. publishServiceChangeEvent(service, ChangeType.CREATED); return service; }
@Test void testCreateGenericEventServiceWithReference() { Service created = null; try { created = service.createGenericEventService("Order Service", "2.0", "order", "{\"customerId\": \"123456789\",\n \"amount\": 12.5}"); } catch (Exception e) { fail("No exception should be thrown"); } // Check created object. assertNotNull(created.getId()); // Retrieve object by id and assert on what has been persisted. Service retrieved = repository.findById(created.getId()).orElse(null); assertEquals("Order Service", retrieved.getName()); assertEquals("2.0", retrieved.getVersion()); assertEquals(ServiceType.GENERIC_EVENT, retrieved.getType()); assertEquals(1, retrieved.getOperations().size()); List<Resource> resources = resourceRepository.findByServiceId(retrieved.getId()); assertEquals(1, resources.size()); Resource resource = resources.get(0); assertEquals("order-asyncapi.yaml", resource.getName()); assertEquals(ResourceType.ASYNC_API_SPEC, resource.getType()); assertNotNull(resource.getContent()); assertTrue(resource.getContent().contains("payload: {\"customerId\": \"123456789\", \"amount\": 12.5}")); }
public void inputWatermark(Watermark watermark, int channelIndex, DataOutput<?> output) throws Exception { final SubpartitionStatus subpartitionStatus; if (watermark instanceof InternalWatermark) { int subpartitionStatusIndex = ((InternalWatermark) watermark).getSubpartitionIndex(); subpartitionStatus = subpartitionStatuses.get(channelIndex).get(subpartitionStatusIndex); } else { subpartitionStatus = subpartitionStatuses.get(channelIndex).get(subpartitionIndexes[channelIndex]); } // ignore the input watermark if its subpartition, or all subpartitions are idle (i.e. // overall the valve is idle). if (lastOutputWatermarkStatus.isActive() && subpartitionStatus.watermarkStatus.isActive()) { long watermarkMillis = watermark.getTimestamp(); // if the input watermark's value is less than the last received watermark for its // subpartition, ignore it also. if (watermarkMillis > subpartitionStatus.watermark) { subpartitionStatus.watermark = watermarkMillis; if (subpartitionStatus.isWatermarkAligned) { adjustAlignedSubpartitionStatuses(subpartitionStatus); } else if (watermarkMillis >= lastOutputWatermark) { // previously unaligned subpartitions are now aligned if its watermark has // caught up markWatermarkAligned(subpartitionStatus); } // now, attempt to find a new min watermark across all aligned subpartitions findAndOutputNewMinWatermarkAcrossAlignedSubpartitions(output); } } }
@Test void testMultipleInputIncreasingWatermarks() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(3); valve.inputWatermark(new Watermark(0), 0, valveOutput); valve.inputWatermark(new Watermark(0), 1, valveOutput); valve.inputWatermark(new Watermark(0), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(0)); valve.inputWatermark(new Watermark(12), 0, valveOutput); valve.inputWatermark(new Watermark(8), 2, valveOutput); valve.inputWatermark(new Watermark(10), 2, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(15), 1, valveOutput); // lowest watermark across all channels is now channel 2, with watermark @ 10 assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(10)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(17), 2, valveOutput); // lowest watermark across all channels is now channel 0, with watermark @ 12 assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(12)); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermark(new Watermark(20), 0, valveOutput); // lowest watermark across all channels is now channel 1, with watermark @ 15 assertThat(valveOutput.popLastSeenOutput()).isEqualTo(new Watermark(15)); assertThat(valveOutput.popLastSeenOutput()).isNull(); }
@Override public Properties info(RedisClusterNode node) { Map<String, String> info = execute(node, RedisCommands.INFO_ALL); Properties result = new Properties(); for (Entry<String, String> entry : info.entrySet()) { result.setProperty(entry.getKey(), entry.getValue()); } return result; }
@Test public void testInfo() { testInCluster(connection -> { RedisClusterNode master = getFirstMaster(connection); Properties info = connection.info(master); assertThat(info.size()).isGreaterThan(10); }); }
public void setProperty(String name, String value) { if (value == null) { return; } name = Introspector.decapitalize(name); PropertyDescriptor prop = getPropertyDescriptor(name); if (prop == null) { addWarn("No such property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(prop, name, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testEnum() { setter.setProperty("houseColor", "BLUE"); assertEquals(HouseColor.BLUE, house.getHouseColor()); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void incompleteForgeInstallation2() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/incomplete_forge_installation2.txt")), CrashReportAnalyzer.Rule.INCOMPLETE_FORGE_INSTALLATION); }
@Override public byte[] decompress(byte[] payloadByteArr) { return payloadByteArr; }
@Test void decompress() { byte[] input = new byte[] {1, 2, 3, 4, 5}; final byte[] decompressed = Identity.IDENTITY.decompress(input); Assertions.assertEquals(input, decompressed); }
public boolean match(String pattern, String path) { return doMatch(pattern, path, true, null); }
@Test public void matchesTest2() { AntPathMatcher antPathMatcher = new AntPathMatcher(); String pattern = "/**/*.xml*"; String path = "/WEB-INF/web.xml"; boolean isMatched = antPathMatcher.match(pattern, path); assertTrue(isMatched); pattern = "org/codelabor/*/**/*Service"; path = "org/codelabor/example/HelloWorldService"; isMatched = antPathMatcher.match(pattern, path); assertTrue(isMatched); pattern = "org/codelabor/*/**/*Service?"; path = "org/codelabor/example/HelloWorldServices"; isMatched = antPathMatcher.match(pattern, path); assertTrue(isMatched); }
public <InputT, OutputT, CollectionT extends PCollection<? extends InputT>> DataSet<OutputT> applyBeamPTransform( DataSet<InputT> input, PTransform<CollectionT, PCollection<OutputT>> transform) { return (DataSet) getNonNull( applyBeamPTransformInternal( ImmutableMap.of("input", input), (pipeline, map) -> (CollectionT) getNonNull(map, "input"), (output) -> ImmutableMap.of("output", output), transform, input.getExecutionEnvironment()), "output"); }
@Test public void testApplyGroupingTransform() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment(); DataSet<String> input = env.fromCollection(ImmutableList.of("a", "a", "b")); DataSet<KV<String, Long>> result = new BeamFlinkDataSetAdapter().applyBeamPTransform(input, Count.perElement()); assertThat(result.collect(), containsInAnyOrder(KV.of("a", 2L), KV.of("b", 1L))); }
public boolean sameOrHigher(Version other) { if (isNullOrEmpty(version.preReleaseVersion().orElse(""))) { return version.isHigherThanOrEquivalentTo(other.getVersion()); } else { // If this is a pre-release version, use the major.minor.patch version for comparison with the other. // This allows plugins to require a server version of 2.1.0 and it still gets loaded on a 2.1.0-beta.2 server. // See: https://github.com/Graylog2/graylog2-server/issues/2462 String version1 = version.toStableVersion().toString(); com.github.zafarkhaja.semver.Version version2 = com.github.zafarkhaja.semver.Version.parse(version1); return version2.isHigherThanOrEquivalentTo(other.getVersion()); } }
@Test public void testSameOrHigher() throws Exception { Version v = Version.from(0, 20, 2); assertTrue(v.sameOrHigher(Version.from(0, 19, 0))); assertTrue(v.sameOrHigher(Version.from(0, 18, 2))); assertTrue(v.sameOrHigher(Version.from(0, 19, 9001))); assertTrue(v.sameOrHigher(Version.from(0, 20, 0))); assertFalse(v.sameOrHigher(Version.from(1, 0, 0))); assertFalse(v.sameOrHigher(Version.from(1, 0, 9001))); assertFalse(v.sameOrHigher(Version.from(1, 20, 0))); assertFalse(v.sameOrHigher(Version.from(1, 1, 0))); assertFalse(v.sameOrHigher(Version.from(3, 2, 1))); assertTrue(v.sameOrHigher(Version.from(0, 19, 0, "rc.1"))); assertFalse(v.sameOrHigher(Version.from(1, 19, 0, "rc.1"))); assertFalse(v.sameOrHigher(Version.from(0, 21, 0, "rc.1"))); assertTrue(v.sameOrHigher(Version.from(0, 20, 1, "rc.1"))); assertTrue(v.sameOrHigher(Version.from(0, 20, 0, "rc.1"))); assertTrue(v.sameOrHigher(Version.from(0, 20, 2, "rc.1"))); assertFalse(v.sameOrHigher(Version.from(0, 20, 3, "rc.1"))); v = Version.from(1, 5, 0); assertTrue(v.sameOrHigher(Version.from(0, 19, 0))); assertTrue(v.sameOrHigher(Version.from(1, 0, 0))); assertTrue(v.sameOrHigher(Version.from(0, 19, 9001))); assertTrue(v.sameOrHigher(Version.from(1, 5, 0))); assertTrue(v.sameOrHigher(Version.from(1, 4, 9))); assertFalse(v.sameOrHigher(Version.from(1, 6, 0))); assertFalse(v.sameOrHigher(Version.from(3, 0, 0))); assertFalse(v.sameOrHigher(Version.from(1, 5, 9001))); assertFalse(v.sameOrHigher(Version.from(1, 20, 0))); assertFalse(v.sameOrHigher(Version.from(1, 20, 5))); assertFalse(v.sameOrHigher(Version.from(3, 2, 1))); assertTrue(v.sameOrHigher(Version.from(0, 19, 0, "rc.1"))); assertFalse(v.sameOrHigher(Version.from(2, 19, 0, "rc.1"))); assertTrue(v.sameOrHigher(Version.from(0, 0, 0))); assertFalse(v.sameOrHigher(Version.from(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE))); // See https://github.com/Graylog2/graylog2-server/issues/2462 v = Version.from(2, 1, 0, "beta.2"); assertTrue(v.sameOrHigher(Version.from(2, 1, 0, "alpha.1"))); assertTrue(v.sameOrHigher(Version.from(2, 1, 0, "beta.1"))); assertTrue(v.sameOrHigher(Version.from(2, 1, 0, "beta.2"))); assertTrue(v.sameOrHigher(Version.from(2, 1, 0))); // This needs to work! assertFalse(v.sameOrHigher(Version.from(2, 2, 0, "alpha.1"))); assertFalse(v.sameOrHigher(Version.from(2, 2, 0))); }
@Override public PageData<Asset> findAssetsByTenantIdAndCustomerId(UUID tenantId, UUID customerId, PageLink pageLink) { return DaoUtil.toPageData(assetRepository .findByTenantIdAndCustomerId( tenantId, customerId, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); }
@Test public void testFindAssetsByTenantIdAndCustomerIdByLabel() { PageLink pageLink = new PageLink(20, 0, "label_"); PageData<Asset> assets1 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(20, assets1.getData().size()); pageLink = pageLink.nextPageLink(); PageData<Asset> assets2 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(10, assets2.getData().size()); pageLink = pageLink.nextPageLink(); PageData<Asset> assets3 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(0, assets3.getData().size()); }
public List<AnalyzedInstruction> getAnalyzedInstructions() { return analyzedInstructions.getValues(); }
@Test public void testInstanceOfNarrowingNez_dalvik() throws IOException { MethodImplementationBuilder builder = new MethodImplementationBuilder(2); builder.addInstruction(new BuilderInstruction22c(Opcode.INSTANCE_OF, 0, 1, new ImmutableTypeReference("Lmain;"))); builder.addInstruction(new BuilderInstruction21t(Opcode.IF_NEZ, 0, builder.getLabel("instance_of"))); builder.addInstruction(new BuilderInstruction10x(Opcode.RETURN_VOID)); builder.addLabel("instance_of"); builder.addInstruction(new BuilderInstruction10x(Opcode.RETURN_VOID)); MethodImplementation methodImplementation = builder.getMethodImplementation(); Method method = new ImmutableMethod("Lmain;", "narrowing", Collections.singletonList(new ImmutableMethodParameter("Ljava/lang/Object;", null, null)), "V", AccessFlags.PUBLIC.getValue(), null, null, methodImplementation); ClassDef classDef = new ImmutableClassDef("Lmain;", AccessFlags.PUBLIC.getValue(), "Ljava/lang/Object;", null, null, null, null, Collections.singletonList(method)); DexFile dexFile = new ImmutableDexFile(Opcodes.getDefault(), Collections.singletonList(classDef)); ClassPath classPath = new ClassPath(new DexClassProvider(dexFile)); MethodAnalyzer methodAnalyzer = new MethodAnalyzer(classPath, method, null, false); List<AnalyzedInstruction> analyzedInstructions = methodAnalyzer.getAnalyzedInstructions(); Assert.assertEquals("Ljava/lang/Object;", analyzedInstructions.get(2).getPreInstructionRegisterType(1).type.getType()); Assert.assertEquals("Ljava/lang/Object;", analyzedInstructions.get(3).getPreInstructionRegisterType(1).type.getType()); }
@Override public <W extends Window> TimeWindowedKStream<K, V> windowedBy(final Windows<W> windows) { return new TimeWindowedKStreamImpl<>( windows, builder, subTopologySourceNodes, name, keySerde, valueSerde, aggregateBuilder, graphNode ); }
@Test public void shouldNotAcceptNullSessionWindowsReducingSessionWindows() { assertThrows(NullPointerException.class, () -> groupedStream.windowedBy((SessionWindows) null)); }
@Override public OutputStream getOutputStream() { return new RedissonOutputStream(); }
@Test public void testWriteArray() throws IOException { RBinaryStream stream = redisson.getBinaryStream("test"); OutputStream os = stream.getOutputStream(); byte[] value = {1, 2, 3, 4, 5, 6}; os.write(value); byte[] s = stream.get(); assertThat(s).isEqualTo(value); }
public boolean isService(Object bean, String beanName) { for (RemotingParser remotingParser : allRemotingParsers) { if (remotingParser.isService(bean, beanName)) { return true; } } return false; }
@Test public void testIsServiceFromObjectFail() { SimpleBean remoteBean = new SimpleBean(); assertFalse(remotingParser.isService(remoteBean, remoteBean.getClass().getName())); }
Set<TaskId> assignedTasks() { final Set<TaskId> assignedActiveTaskIds = assignedActiveTasks.taskIds(); final Set<TaskId> assignedStandbyTaskIds = assignedStandbyTasks.taskIds(); // Since we're copying it, it's not strictly necessary to make it unmodifiable also. // I'm just trying to prevent subtle bugs if we write code that thinks it can update // the assignment by updating the returned set. return unmodifiableSet( union( () -> new HashSet<>(assignedActiveTaskIds.size() + assignedStandbyTaskIds.size()), assignedActiveTaskIds, assignedStandbyTaskIds ) ); }
@Test public void shouldNotModifyAssignedView() { final ClientState clientState = new ClientState(1); final Set<TaskId> taskIds = clientState.assignedTasks(); assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0)); assertThat(clientState, hasActiveTasks(0)); assertThat(clientState, hasStandbyTasks(0)); }
@Override public ExecuteContext before(ExecuteContext context) { Object object = context.getObject(); String serviceId = getServiceId(object).orElse(null); if (StringUtils.isBlank(serviceId)) { return context; } Object[] arguments = context.getArguments(); List<Object> instances = (List<Object>) arguments[0]; if (CollectionUtils.isEmpty(instances)) { return context; } RequestData requestData = ThreadLocalUtils.getRequestData(); List<Object> targetInstances = loadBalancerService.getTargetInstances(serviceId, instances, requestData); arguments[0] = targetInstances; return context; }
@Test public void testBeforeWithEmptyInstances() { arguments[0] = Collections.emptyList(); ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", "")); interceptor.before(context); List<ServiceInstance> instances = (List<ServiceInstance>) context.getArguments()[0]; Assert.assertNotNull(instances); Assert.assertEquals(0, instances.size()); }
public static Map<String, String> getSegmentUriToTarPathMap(URI outputDirURI, PushJobSpec pushSpec, String[] files) { Map<String, String> segmentUriToTarPathMap = new HashMap<>(); PathMatcher pushFilePathMatcher = null; if (pushSpec.getPushFileNamePattern() != null) { pushFilePathMatcher = FileSystems.getDefault().getPathMatcher(pushSpec.getPushFileNamePattern()); } for (String file : files) { if (pushFilePathMatcher != null) { if (!pushFilePathMatcher.matches(Paths.get(file))) { continue; } } URI uri = URI.create(file); if (uri.getPath().endsWith(Constants.METADATA_TAR_GZ_FILE_EXT)) { // Skip segment metadata tar gz files continue; } if (uri.getPath().endsWith(Constants.TAR_GZ_FILE_EXT)) { URI updatedURI = SegmentPushUtils.generateSegmentTarURI(outputDirURI, uri, pushSpec.getSegmentUriPrefix(), pushSpec.getSegmentUriSuffix()); segmentUriToTarPathMap.put(updatedURI.toString(), file); } } return segmentUriToTarPathMap; }
@Test public void testGetSegmentUriToTarPathMap() throws IOException { URI outputDirURI = Files.createTempDirectory("test").toUri(); String[] segmentFiles = new String[]{ outputDirURI.resolve("segment.tar.gz").toString(), outputDirURI.resolve("stats_202201.tar.gz").toString(), outputDirURI.resolve("/2022/segment.tar.gz").toString(), outputDirURI.resolve("/2022/stats_202201.tar.gz").toString() }; PushJobSpec pushSpec = new PushJobSpec(); Map<String, String> result = SegmentPushUtils.getSegmentUriToTarPathMap(outputDirURI, pushSpec, segmentFiles); assertEquals(result.size(), 4); for (String segmentFile : segmentFiles) { assertTrue(result.containsKey(segmentFile)); assertEquals(result.get(segmentFile), segmentFile); } pushSpec.setPushFileNamePattern("glob:**/2022/*.tar.gz"); result = SegmentPushUtils.getSegmentUriToTarPathMap(outputDirURI, pushSpec, segmentFiles); assertEquals(result.size(), 2); assertEquals(result.get(segmentFiles[2]), segmentFiles[2]); assertEquals(result.get(segmentFiles[3]), segmentFiles[3]); pushSpec.setPushFileNamePattern("glob:**/stats_*.tar.gz"); result = SegmentPushUtils.getSegmentUriToTarPathMap(outputDirURI, pushSpec, segmentFiles); assertEquals(result.size(), 2); assertEquals(result.get(segmentFiles[1]), segmentFiles[1]); assertEquals(result.get(segmentFiles[3]), segmentFiles[3]); }
public boolean isSecurityEnabled() { return (this._options & SECURITY) != 0; }
@Test void defaultsToSecurityBeingDisabled() throws Exception { assertThat(handler.isSecurityEnabled()) .isFalse(); }
@Override public void trace(String msg) { logger.trace(msg); }
@Test void testTraceWithException() { Exception exception = new Exception(); jobRunrDashboardLogger.trace("trace", exception); verify(slfLogger).trace("trace", exception); }
private static ClientHighAvailabilityServices createCustomClientHAServices(Configuration config) throws FlinkException { return createCustomClientHAServices(config.get(HighAvailabilityOptions.HA_MODE), config); }
@Test public void testCreateCustomClientHAServices() throws Exception { Configuration config = new Configuration(); ClientHighAvailabilityServices clientHAServices = TestingClientHAServices.createClientHAServices(); TestHAFactory.clientHAServices = clientHAServices; config.set(HighAvailabilityOptions.HA_MODE, TestHAFactory.class.getName()); // when ClientHighAvailabilityServices actualClientHAServices = HighAvailabilityServicesUtils.createClientHAService( config, NoOpFatalErrorHandler.INSTANCE); // then assertSame(clientHAServices, actualClientHAServices); }
public Map<COSObjectKey, COSBase> parseAllObjects() throws IOException { Map<COSObjectKey, COSBase> allObjects = new HashMap<>(); try { Map<Integer, Long> objectNumbers = privateReadObjectOffsets(); // count the number of object numbers eliminating double entries long numberOfObjNumbers = objectNumbers.values().stream().distinct().count(); // the usage of the index should be restricted to cases where more than one // object use the same object number. // there are malformed pdfs in the wild which would lead to false results if // pdfbox always relies on the index if available. In most cases the object number // is sufficient to choose the correct object boolean indexNeeded = objectNumbers.size() > numberOfObjNumbers; long currentPosition = source.getPosition(); if (firstObject > 0 && currentPosition < firstObject) { source.skip(firstObject - (int) currentPosition); } int index = 0; for (Entry<Integer, Long> entry : objectNumbers.entrySet()) { COSObjectKey objectKey = getObjectKey(entry.getValue(), 0); // skip object if the index doesn't match if (indexNeeded && objectKey.getStreamIndex() > -1 && objectKey.getStreamIndex() != index) { index++; continue; } int finalPosition = firstObject + entry.getKey(); currentPosition = source.getPosition(); if (finalPosition > 0 && currentPosition < finalPosition) { // jump to the offset of the object to be parsed source.skip(finalPosition - (int) currentPosition); } COSBase streamObject = parseDirObject(); if (streamObject != null) { streamObject.setDirect(false); } allObjects.put(objectKey, streamObject); index++; } } finally { source.close(); document = null; } return allObjects; }
@Test void testParseAllObjectsUseMalformedIndex() throws IOException { COSStream stream = new COSStream(); stream.setItem(COSName.N, COSInteger.THREE); stream.setItem(COSName.FIRST, COSInteger.get(13)); OutputStream outputStream = stream.createOutputStream(); outputStream.write("6 0 4 5 4 11 true false true".getBytes()); outputStream.close(); COSDocument cosDoc = new COSDocument(); Map<COSObjectKey, Long> xrefTable = cosDoc.getXrefTable(); // add an index for each object key which doesn't match with the index of the object stream // add two object keys only as the object stream uses one object number for two objects xrefTable.put(new COSObjectKey(6, 0, 10), -1L); xrefTable.put(new COSObjectKey(4, 0, 11), -1L); PDFObjectStreamParser objectStreamParser = new PDFObjectStreamParser(stream, cosDoc); // as the used object numbers aren't unique within the object the index of the obejct keys is used // All objects are dropped as the malformed index values don't match the index of the object within the stream Map<COSObjectKey, COSBase> objectNumbers = objectStreamParser.parseAllObjects(); assertEquals(0, objectNumbers.size()); }
@Override public int read() throws IOException { final int tmp = read(z); return tmp == -1 ? -1 : (0xFF & z[0]); }
@Test public void testUneven() throws Exception { final Configuration conf = new Configuration(); Arrays.fill(loc, ""); Arrays.fill(start, 0L); Arrays.fill(len, BLOCK); final int B2 = BLOCK / 2; for (int i = 0; i < NFILES; i += 2) { start[i] += B2; len[i] -= B2; } final FileQueue q = new FileQueue(new CombineFileSplit(paths, start, len, loc), conf); final ByteArrayOutputStream out = fillVerif(); final byte[] verif = out.toByteArray(); final byte[] check = new byte[NFILES / 2 * BLOCK + NFILES / 2 * B2]; q.read(check, 0, verif.length); assertArrayEquals(verif, Arrays.copyOf(check, verif.length)); q.read(check, 0, verif.length); assertArrayEquals(verif, Arrays.copyOf(check, verif.length)); }
public Versions intersect(Versions other) { short newLowest = lowest > other.lowest ? lowest : other.lowest; short newHighest = highest < other.highest ? highest : other.highest; if (newLowest > newHighest) { return Versions.NONE; } return new Versions(newLowest, newHighest); }
@Test public void testIntersections() { assertEquals(newVersions(2, 3), newVersions(1, 3).intersect( newVersions(2, 4))); assertEquals(newVersions(3, 3), newVersions(0, Short.MAX_VALUE).intersect( newVersions(3, 3))); assertEquals(Versions.NONE, newVersions(9, Short.MAX_VALUE).intersect( newVersions(2, 8))); assertEquals(Versions.NONE, Versions.NONE.intersect(Versions.NONE)); }
Handler getBrokerStateHandler() { return new AbstractHandler() { @Override public void handle(String s, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("application/json"); response.setCharacterEncoding("UTF-8"); baseRequest.setHandled(true); Map<String, Object> brokerStateResponse = new HashMap<>(); if (brokerState != null) { if ((byte) brokerState.value() == BROKER_RECOVERY_STATE && remainingLogsToRecover != null && remainingSegmentsToRecover != null) { Map<String, Object> recoveryState = new HashMap<>(); recoveryState.put("remainingLogsToRecover", remainingLogsToRecover.value()); recoveryState.put("remainingSegmentsToRecover", remainingSegmentsToRecover.value()); brokerStateResponse.put("brokerState", brokerState.value()); brokerStateResponse.put("recoveryState", recoveryState); } else { brokerStateResponse.put("brokerState", brokerState.value()); } response.setStatus(HttpServletResponse.SC_OK); String json = new ObjectMapper().writeValueAsString(brokerStateResponse); response.getWriter().print(json); } else { response.setStatus(HttpServletResponse.SC_NOT_FOUND); response.getWriter().print("Broker state metric not found"); } } }; }
@Test public void testBrokerRecoveryState() throws Exception { @SuppressWarnings({ "rawtypes" }) final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 2); @SuppressWarnings({ "rawtypes" }) final Gauge remainingLogs = mock(Gauge.class); when(remainingLogs.value()).thenReturn((byte) 10); @SuppressWarnings({ "rawtypes" }) final Gauge remainingSegments = mock(Gauge.class); when(remainingSegments.value()).thenReturn((byte) 100); KafkaAgent agent = new KafkaAgent(brokerState, remainingLogs, remainingSegments, null); context.setHandler(agent.getBrokerStateHandler()); server.setHandler(context); server.start(); HttpResponse<String> response = HttpClient.newBuilder() .build() .send(req, HttpResponse.BodyHandlers.ofString()); assertThat(HttpServletResponse.SC_OK, is(response.statusCode())); String expectedResponse = "{\"brokerState\":2,\"recoveryState\":{\"remainingLogsToRecover\":10,\"remainingSegmentsToRecover\":100}}"; assertThat(expectedResponse, is(response.body())); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void testAutoMPGQuantile() { test(Loss.quantile(0.5), "autoMPG", AutoMPG.formula, AutoMPG.data, 3.0979); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void handleOverrideDefaultConfiguration() throws Exception { System.setProperty("dw.name", "Coda Hale Overridden"); System.setProperty("dw.type", "coder,wizard,overridden"); System.setProperty("dw.properties.settings.enabled", "true"); System.setProperty("dw.servers[0].port", "8090"); System.setProperty("dw.servers[2].port", "8092"); final ExampleWithDefaults example = new YamlConfigurationFactory<>(ExampleWithDefaults.class, validator, Jackson.newObjectMapper(), "dw") .build(); assertThat(example) .satisfies(eg -> assertThat(eg.name).isEqualTo("Coda Hale Overridden")) .satisfies(eg -> assertThat(eg.type) .hasSize(3) .element(2) .isEqualTo("overridden")) .satisfies(eg -> assertThat(eg.properties).containsEntry("settings.enabled", "true")) .satisfies(eg -> assertThat(eg.servers) .satisfies(servers -> assertThat(servers).element(0).extracting(ExampleServer::getPort).isEqualTo(8090)) .satisfies(servers -> assertThat(servers).element(2).extracting(ExampleServer::getPort).isEqualTo(8092))); }
public static Object construct(Object something) throws Exception { if (something instanceof String) { return Class.forName((String)something).getConstructor().newInstance(); } else if (something instanceof Map) { // keys are the class name, values are the parameters. for (Map.Entry<String, Object> entry : ((Map<String, Object>) something).entrySet()) { if (entry.getValue() instanceof Map) { return constructByNamedParams(Class.forName(entry.getKey()), (Map)entry.getValue()); } else if (entry.getValue() instanceof List) { return constructByParameterizedConstructor(Class.forName(entry.getKey()), (List)entry.getValue()); } } } return null; }
@Test public void classWithoutMatchedConstructor_constructed_succeedsWhenDefault() throws Exception { Map<String, List<Map<String, Object>>> constructMap = new HashMap<>(); List<Map<String, Object>> params = new ArrayList<>(); params.add(Collections.singletonMap("java.lang.String", "Hello")); constructMap.put("com.networknt.service.GImpl", params); Assert.assertNotNull(ServiceUtil.construct(constructMap)); }
public static String getEndIpStr(String ip, int maskBit) { return longToIpv4(getEndIpLong(ip, maskBit)); }
@Test public void getEndIpStrTest(){ final String ip = "192.168.1.1"; final int maskBitByMask = Ipv4Util.getMaskBitByMask("255.255.255.0"); final String endIpStr = Ipv4Util.getEndIpStr(ip, maskBitByMask); assertEquals("192.168.1.255", endIpStr); }
@Override public TenantPackageDO validTenantPackage(Long id) { TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id); if (tenantPackage == null) { throw exception(TENANT_PACKAGE_NOT_EXISTS); } if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName()); } return tenantPackage; }
@Test public void testValidTenantPackage_success() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据 // 调用 TenantPackageDO result = tenantPackageService.validTenantPackage(dbTenantPackage.getId()); // 断言 assertPojoEquals(dbTenantPackage, result); }
public Map<String, String> getAllProperties() { ImmutableMap.Builder<String, String> builder = ImmutableMap.builder(); return builder.put(CONCURRENT_LIFESPANS_PER_TASK, String.valueOf(getConcurrentLifespansPerTask())) .put(ENABLE_SERIALIZED_PAGE_CHECKSUM, String.valueOf(isEnableSerializedPageChecksum())) .put(ENABLE_VELOX_EXPRESSION_LOGGING, String.valueOf(isEnableVeloxExpressionLogging())) .put(ENABLE_VELOX_TASK_LOGGING, String.valueOf(isEnableVeloxTaskLogging())) .put(HTTP_SERVER_HTTP_PORT, String.valueOf(getHttpServerPort())) .put(HTTP_SERVER_REUSE_PORT, String.valueOf(isHttpServerReusePort())) .put(HTTP_SERVER_BIND_TO_NODE_INTERNAL_ADDRESS_ONLY_ENABLED, String.valueOf(isHttpServerBindToNodeInternalAddressOnlyEnabled())) .put(REGISTER_TEST_FUNCTIONS, String.valueOf(isRegisterTestFunctions())) .put(HTTP_SERVER_HTTPS_PORT, String.valueOf(getHttpsServerPort())) .put(HTTP_SERVER_HTTPS_ENABLED, String.valueOf(isEnableHttpsCommunication())) .put(HTTPS_CIPHERS, String.valueOf(getHttpsCiphers())) .put(HTTPS_CERT_PATH, String.valueOf(getHttpsCertPath())) .put(HTTPS_KEY_PATH, String.valueOf(getHttpsKeyPath())) .put(HTTP_SERVER_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getHttpServerNumIoThreadsHwMultiplier())) .put(EXCHANGE_HTTP_CLIENT_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getExchangeHttpClientNumIoThreadsHwMultiplier())) .put(ASYNC_DATA_CACHE_ENABLED, String.valueOf(getAsyncDataCacheEnabled())) .put(ASYNC_CACHE_SSD_GB, String.valueOf(getAsyncCacheSsdGb())) .put(CONNECTOR_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getConnectorNumIoThreadsHwMultiplier())) .put(PRESTO_VERSION, getPrestoVersion()) .put(SHUTDOWN_ONSET_SEC, String.valueOf(getShutdownOnsetSec())) .put(SYSTEM_MEMORY_GB, String.valueOf(getSystemMemoryGb())) .put(QUERY_MEMORY_GB, String.valueOf(getQueryMemoryGb())) .put(USE_MMAP_ALLOCATOR, String.valueOf(getUseMmapAllocator())) .put(MEMORY_ARBITRATOR_KIND, String.valueOf(getMemoryArbitratorKind())) .put(MEMORY_ARBITRATOR_CAPACITY_GB, String.valueOf(getMemoryArbitratorCapacityGb())) .put(MEMORY_ARBITRATOR_RESERVED_CAPACITY_GB, String.valueOf(getMemoryArbitratorReservedCapacityGb())) .put(MEMORY_POOL_INIT_CAPACITY, String.valueOf(getMemoryPoolInitCapacity())) .put(MEMORY_POOL_RESERVED_CAPACITY, String.valueOf(getMemoryPoolReservedCapacity())) .put(MEMORY_POOL_TRANSFER_CAPACITY, String.valueOf(getMemoryPoolTransferCapacity())) .put(MEMORY_RECLAIM_WAIT_MS, String.valueOf(getMemoryReclaimWaitMs())) .put(SPILLER_SPILL_PATH, String.valueOf(getSpillerSpillPath())) .put(TASK_MAX_DRIVERS_PER_TASK, String.valueOf(getMaxDriversPerTask())) .put(ENABLE_OLD_TASK_CLEANUP, String.valueOf(getOldTaskCleanupMs())) .put(SHUFFLE_NAME, getShuffleName()) .put(HTTP_SERVER_ACCESS_LOGS, String.valueOf(isEnableHttpServerAccessLog())) .put(CORE_ON_ALLOCATION_FAILURE_ENABLED, String.valueOf(isCoreOnAllocationFailureEnabled())) .build(); }
@Test public void testNativeExecutionConnectorConfig() { // Test defaults assertRecordedDefaults(ConfigAssertions.recordDefaults(NativeExecutionConnectorConfig.class) .setCacheEnabled(false) .setMaxCacheSize(new DataSize(0, DataSize.Unit.MEGABYTE)) .setConnectorName("hive")); // Test explicit property mapping. Also makes sure properties returned by getAllProperties() covers full property list. NativeExecutionConnectorConfig expected = new NativeExecutionConnectorConfig() .setConnectorName("custom") .setMaxCacheSize(new DataSize(32, DataSize.Unit.MEGABYTE)) .setCacheEnabled(true); Map<String, String> properties = new java.util.HashMap<>(expected.getAllProperties()); // Since the cache.max-cache-size requires to be size without the unit which to be compatible with the C++, // here we convert the size from Long type (in string format) back to DataSize for comparison properties.put("cache.max-cache-size", String.valueOf(new DataSize(Double.parseDouble(properties.get("cache.max-cache-size")), DataSize.Unit.MEGABYTE))); assertFullMapping(properties, expected); }
@Nonnull @Override public Optional<Signature> parse( @Nullable String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } final String generalizedStr = str.toLowerCase().trim(); if (!generalizedStr.contains("with")) { return map(str, detectionLocation); } int hashEndPos = generalizedStr.indexOf("with"); String digestStr = str.substring(0, hashEndPos); JcaMessageDigestMapper jcaMessageDigestMapper = new JcaMessageDigestMapper(); final Optional<MessageDigest> messageDigestOptional = jcaMessageDigestMapper.parse(digestStr, detectionLocation); int encryptStartPos = hashEndPos + 4; String signatureStr = str.substring(encryptStartPos); final String format; if (generalizedStr.contains("in") && generalizedStr.contains("format")) { int inStartPos = generalizedStr.indexOf("in"); int inEndPos = inStartPos + 2; signatureStr = str.substring(encryptStartPos, inStartPos); format = str.substring(inEndPos); } else { format = null; } return map(signatureStr, detectionLocation) .map( signature -> { messageDigestOptional.ifPresent(signature::put); if (format != null) { signature.put(new OutputFormat(format, detectionLocation)); } return signature; }); }
@Test void NONEwithRSA() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaSignatureMapper jcaSignatureMapper = new JcaSignatureMapper(); Optional<Signature> signatureOptional = jcaSignatureMapper.parse("NONEwithRSA", testDetectionLocation); assertThat(signatureOptional).isPresent(); assertThat(signatureOptional.get().is(Signature.class)).isTrue(); assertThat(signatureOptional.get().getFormat()).isEmpty(); assertThat(signatureOptional.get().getDigest()).isEmpty(); }
public <T> org.apache.flink.api.common.state.v2.ValueState<T> getValueState( org.apache.flink.runtime.state.v2.ValueStateDescriptor<T> stateProperties) { KeyedStateStoreV2 keyedStateStoreV2 = checkPreconditionsAndGetKeyedStateStoreV2(stateProperties); return keyedStateStoreV2.getValueState(stateProperties); }
@Test void testV2ValueStateInstantiation() throws Exception { final ExecutionConfig config = new ExecutionConfig(); SerializerConfig serializerConfig = config.getSerializerConfig(); serializerConfig.registerKryoType(Path.class); final AtomicReference<Object> descriptorCapture = new AtomicReference<>(); StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config); org.apache.flink.runtime.state.v2.ValueStateDescriptor<TaskInfo> descr = new org.apache.flink.runtime.state.v2.ValueStateDescriptor<>( "name", TypeInformation.of(TaskInfo.class), serializerConfig); context.getValueState(descr); org.apache.flink.runtime.state.v2.ValueStateDescriptor<?> descrIntercepted = (org.apache.flink.runtime.state.v2.ValueStateDescriptor<?>) descriptorCapture.get(); TypeSerializer<?> serializer = descrIntercepted.getSerializer(); // check that the Path class is really registered, i.e., the execution config was applied assertThat(serializer).isInstanceOf(KryoSerializer.class); assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId()) .isPositive(); }
@Override public void write(String key, InputStream data) { checkNotNull(data); try { write(key, data.readAllBytes()); } catch (IOException e) { throw new IllegalStateException("Failed to read sensor write cache data", e); } }
@Test public void write_inputStream_adds_entries() { byte[] b1 = new byte[] {1, 2, 3}; byte[] b2 = new byte[] {3, 4}; writeCache.write("key", new ByteArrayInputStream(b1)); writeCache.write("key2", new ByteArrayInputStream(b2)); assertThatCacheContains(Map.of("key", b1, "key2", b2)); }
public Optional<Object> getLiteralValue(final int index) { ExpressionSegment valueExpression = valueExpressions.get(index); if (valueExpression instanceof ParameterMarkerExpressionSegment) { return Optional.ofNullable(parameters.get(getParameterIndex((ParameterMarkerExpressionSegment) valueExpression))); } if (valueExpression instanceof LiteralExpressionSegment) { return Optional.ofNullable(((LiteralExpressionSegment) valueExpression).getLiterals()); } return Optional.empty(); }
@Test void assertGetLiteralValueWhenLiteralExpressionSegment() { Object literalObject = new Object(); Collection<ExpressionSegment> assignments = makeLiteralExpressionSegment(literalObject); InsertValueContext insertValueContext = new InsertValueContext(assignments, Collections.emptyList(), 0); Optional<Object> valueFromInsertValueContext = insertValueContext.getLiteralValue(0); assertTrue(valueFromInsertValueContext.isPresent()); assertThat(valueFromInsertValueContext.get(), is(literalObject)); }
private ByteBuf buffer(int i) { ByteBuf b = buffers[i]; return b instanceof Component ? ((Component) b).buf : b; }
@Test public void testGatheringWritesPartialHeap() throws Exception { testGatheringWritesPartial(buffer(), buffer()); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldSetKsqlSinkForSinks() { // When: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream s as select * from orders;" + "create table t as select itemid, count(*) from orders group by itemid;", ksqlConfig, Collections.emptyMap() ); // Then: assertThat(metaStore.getSource(SourceName.of("S")).isCasTarget(), is(true)); assertThat(metaStore.getSource(SourceName.of("T")).isCasTarget(), is(true)); }
public RawErasureCoderFactory getCoderByName( String codecName, String coderName) { List<RawErasureCoderFactory> coders = getCoders(codecName); // find the RawErasureCoderFactory with the name of coderName for (RawErasureCoderFactory coder : coders) { if (coder.getCoderName().equals(coderName)) { return coder; } } return null; }
@Test public void testGetCoderByName() { RawErasureCoderFactory coder = CodecRegistry.getInstance(). getCoderByName(ErasureCodeConstants.RS_CODEC_NAME, RSRawErasureCoderFactory.CODER_NAME); assertTrue(coder instanceof RSRawErasureCoderFactory); coder = CodecRegistry.getInstance().getCoderByName( ErasureCodeConstants.RS_CODEC_NAME, NativeRSRawErasureCoderFactory.CODER_NAME); assertTrue(coder instanceof NativeRSRawErasureCoderFactory); coder = CodecRegistry.getInstance().getCoderByName( ErasureCodeConstants.RS_LEGACY_CODEC_NAME, RSLegacyRawErasureCoderFactory.CODER_NAME); assertTrue(coder instanceof RSLegacyRawErasureCoderFactory); coder = CodecRegistry.getInstance().getCoderByName( ErasureCodeConstants.XOR_CODEC_NAME, XORRawErasureCoderFactory.CODER_NAME); assertTrue(coder instanceof XORRawErasureCoderFactory); coder = CodecRegistry.getInstance().getCoderByName( ErasureCodeConstants.XOR_CODEC_NAME, NativeXORRawErasureCoderFactory.CODER_NAME); assertTrue(coder instanceof NativeXORRawErasureCoderFactory); }
public void execute() { new PathAwareCrawler<>( FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas)) .visit(treeRootHolder.getReportTreeRoot()); }
@Test public void compute_and_aggregate_zero_duplicated_line_when_no_duplication() { underTest.execute(); assertComputedAndAggregatedToZeroInt(DUPLICATED_LINES_KEY); }
public static BuildInfo getBuildInfo() { if (Overrides.isEnabled()) { // never use cache when override is enabled -> we need to re-parse everything Overrides overrides = Overrides.fromProperties(); return getBuildInfoInternalVersion(overrides); } return BUILD_INFO_CACHE; }
@Test public void testEdition_whenNotOverridden() { BuildInfo buildInfo = BuildInfoProvider.getBuildInfo(); assertFalse(buildInfo.isEnterprise()); }
@Override public int getEventType() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void testGetEventType() { batchEventData.getEventType(); }
@Override public ValidationTaskResult validateImpl(Map<String, String> optionMap) { // Skip this test if NOSASL if (mConf.get(PropertyKey.SECURITY_AUTHENTICATION_TYPE) .equals(AuthType.NOSASL)) { return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(), String.format("Impersonation validation is skipped for NOSASL"), ""); } ValidationTaskResult loadConfig = loadHdfsConfig(); if (loadConfig.getState() != ValidationUtils.State.OK) { mAdvice.insert(0, "Validating the proxy user requires additional HDFS " + "configuration. "); return loadConfig.setAdvice(mAdvice.toString()); } // TODO(jiacheng): validate proxyuser.hosts for the cluster // Validate proxyuser config for the current Alluxio user try { String alluxioUser = getCurrentUser(); return validateProxyUsers(alluxioUser); } catch (UnauthenticatedException e) { mMsg.append(String.format("Failed to authenticate in Alluxio: ")); mMsg.append(ExceptionUtils.asPlainText(e)); mAdvice.append("Please fix the authentication issue."); return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), mMsg.toString(), mAdvice.toString()); } }
@Test public void wildcardProxyGroups() { String userName = System.getProperty("user.name"); // Proxy groups configured but not users prepareHdfsConfFiles(ImmutableMap.of( String.format("hadoop.proxyuser.%s.groups", userName), "*")); HdfsProxyUserValidationTask task = new HdfsProxyUserValidationTask("hdfs://namenode:9000/alluxio", mConf); ValidationTaskResult result = task.validateImpl(ImmutableMap.of()); assertEquals(ValidationUtils.State.OK, result.getState()); }
public static <T> Class<T> toClass(String className) { switch (className) { case "boolean": return cast(boolean.class); case "byte": return cast(byte.class); case "short": return cast(short.class); case "int": return cast(int.class); case "long": return cast(long.class); case "float": return cast(float.class); case "double": return cast(double.class); case "char": return cast(char.class); case "void": return cast(void.class); default: try { return cast(loadClass(className)); } catch (ClassNotFoundException ex) { throw new IllegalArgumentException("Class not found: " + className, ex); } } }
@Test void testToClass() { assertThat(ReflectionUtils.toClass("boolean")).isEqualTo(boolean.class); assertThat(ReflectionUtils.toClass("byte")).isEqualTo(byte.class); assertThat(ReflectionUtils.toClass("short")).isEqualTo(short.class); assertThat(ReflectionUtils.toClass("int")).isEqualTo(int.class); assertThat(ReflectionUtils.toClass("long")).isEqualTo(long.class); assertThat(ReflectionUtils.toClass("float")).isEqualTo(float.class); assertThat(ReflectionUtils.toClass("double")).isEqualTo(double.class); assertThat(ReflectionUtils.toClass("char")).isEqualTo(char.class); assertThat(ReflectionUtils.toClass("void")).isEqualTo(void.class); assertThat(ReflectionUtils.toClass("java.lang.String")).isEqualTo(String.class); assertThatThrownBy(() -> ReflectionUtils.toClass("class.that.does.not.exist")).isInstanceOf(IllegalArgumentException.class); }
public Result checkConnectionToPackage(String pluginId, final com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration, final RepositoryConfiguration repositoryConfiguration) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_CHECK_PACKAGE_CONNECTION, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return messageConverter(resolvedExtensionVersion).requestMessageForCheckConnectionToPackage(packageConfiguration, repositoryConfiguration); } @Override public Result onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageConverter(resolvedExtensionVersion).responseMessageForCheckConnectionToPackage(responseBody); } }); }
@Test public void shouldTalkToPluginToCheckPackageConnectionSuccessful() throws Exception { String expectedRequestBody = "{\"repository-configuration\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}}," + "\"package-configuration\":{\"key-three\":{\"value\":\"value-three\"},\"key-four\":{\"value\":\"value-four\"}}}"; String expectedResponseBody = "{\"status\":\"success\",messages=[\"message-one\",\"message-two\"]}"; when(pluginManager.isPluginOfType(PACKAGE_MATERIAL_EXTENSION, PLUGIN_ID)).thenReturn(true); when(pluginManager.submitTo(eq(PLUGIN_ID), eq(PACKAGE_MATERIAL_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(expectedResponseBody)); Result result = extension.checkConnectionToPackage(PLUGIN_ID, packageConfiguration, repositoryConfiguration); assertRequest(requestArgumentCaptor.getValue(), PACKAGE_MATERIAL_EXTENSION, "1.0", PackageRepositoryExtension.REQUEST_CHECK_PACKAGE_CONNECTION, expectedRequestBody); assertSuccessResult(result, List.of("message-one", "message-two")); }
String parseAndGenerateOutput(String json) { JsonNode jsonNode; try { jsonNode = Jackson.mapper().readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); return switch (status) { case statusUnknown -> "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress -> "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished -> "File distribution finished"; default -> throw new RuntimeException("Unknown status " + status); }; }
@Test public void manyHostsVariousStates() { String statusForTwoHosts = createStatusForTwoHosts(); System.out.println(statusForTwoHosts); String output = client.parseAndGenerateOutput(statusForTwoHosts); assertEquals("File distribution in progress:\nlocalhost1: IN_PROGRESS (1 of 2 finished)\nlocalhost2: UNKNOWN (Connection timed out)", output); }
@SqlNullable @Description("extract query parameter from url") @ScalarFunction @LiteralParameters({"x", "y"}) @SqlType("varchar(x)") public static Slice urlExtractParameter(@SqlType("varchar(x)") Slice url, @SqlType("varchar(y)") Slice parameterName) { URI uri = parseUrl(url); if ((uri == null) || (uri.getQuery() == null)) { return null; } Slice query = slice(uri.getQuery()); String parameter = parameterName.toStringUtf8(); Iterable<String> queryArgs = QUERY_SPLITTER.split(query.toStringUtf8()); for (String queryArg : queryArgs) { Iterator<String> arg = ARG_SPLITTER.split(queryArg).iterator(); if (arg.next().equals(parameter)) { if (arg.hasNext()) { return utf8Slice(arg.next()); } // first matched key is empty return Slices.EMPTY_SLICE; } } // no key matched return null; }
@Test public void testUrlExtractParameter() { assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k2=v2&k3&k4#Ref1', 'k1')", createVarcharType(53), "v1"); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k2=v2&k3&k4#Ref1', 'k2')", createVarcharType(53), "v2"); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k2=v2&k3&k4#Ref1', 'k3')", createVarcharType(53), ""); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k2=v2&k3&k4#Ref1', 'k4')", createVarcharType(53), ""); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k2=v2&k3&k4#Ref1', 'k5')", createVarcharType(53), null); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1=v1&k1=v2&k1&k1#Ref1', 'k1')", createVarcharType(53), "v1"); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k1&k1=v1&k1&k1#Ref1', 'k1')", createVarcharType(50), ""); assertFunction("url_extract_parameter('http://example.com/path1/p.php?k=a=b=c&x=y#Ref1', 'k')", createVarcharType(47), "a=b=c"); assertFunction("url_extract_parameter('foo', 'k1')", createVarcharType(3), null); }
@Override public List<TopicPath> listTopics(ProjectPath project) throws IOException { Topics.List request = pubsub.projects().topics().list(project.getPath()); ListTopicsResponse response = request.execute(); if (response.getTopics() == null || response.getTopics().isEmpty()) { return ImmutableList.of(); } List<TopicPath> topics = new ArrayList<>(response.getTopics().size()); while (true) { for (Topic topic : response.getTopics()) { topics.add(topicPathFromPath(topic.getName())); } if (Strings.isNullOrEmpty(response.getNextPageToken())) { break; } request.setPageToken(response.getNextPageToken()); response = request.execute(); } return topics; }
@Test public void listTopics() throws Exception { ListTopicsResponse expectedResponse1 = new ListTopicsResponse(); expectedResponse1.setTopics(Collections.singletonList(buildTopic(1))); expectedResponse1.setNextPageToken("AVgJH3Z7aHxiDBs"); ListTopicsResponse expectedResponse2 = new ListTopicsResponse(); expectedResponse2.setTopics(Collections.singletonList(buildTopic(2))); Topics.List request = mockPubsub.projects().topics().list(PROJECT.getPath()); when((Object) request.execute()).thenReturn(expectedResponse1, expectedResponse2); List<TopicPath> topicPaths = client.listTopics(PROJECT); assertEquals(2, topicPaths.size()); }
public void removePackageRepository(String id) { PackageRepository packageRepositoryToBeDeleted = this.find(id); if (packageRepositoryToBeDeleted == null) { throw new RuntimeException(String.format("Could not find repository with id '%s'", id)); } this.remove(packageRepositoryToBeDeleted); }
@Test void shouldReturnNullExceptionWhenRepoIdIsNotFound() throws Exception { PackageRepositories packageRepositories = new PackageRepositories(); try { packageRepositories.removePackageRepository("repo1"); fail("This should have thrown an exception"); } catch (Exception e) { assertThat(e.getMessage()).isEqualTo(String.format("Could not find repository with id '%s'", "repo1")); } }
@Override public TenantDO getTenant(Long id) { return tenantMapper.selectById(id); }
@Test public void testGetTenant() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbTenant.getId(); // 调用 TenantDO result = tenantService.getTenant(id); // 校验存在 assertPojoEquals(result, dbTenant); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullAddress() { Ip6Address ipAddress; Ip6Prefix ipPrefix; ipAddress = null; ipPrefix = Ip6Prefix.valueOf(ipAddress, 24); }
@Override public void subscribe(CoreSubscriber<? super Object> s) { if (eventLoop.inEventLoop()) { startReceiver(s); } else { eventLoop.execute(() -> startReceiver(s)); } }
@Test void disposeAndSubscribeRaceTest() { for (int i = 0; i < 100; i++) { ChannelOperations<NettyInbound, NettyOutbound> operations = new ChannelOperations<>(EmbeddedChannel::new, (connection, newState) -> { }); FluxReceive receive = new FluxReceive(operations); TestSubscriber<Object> subscriber = TestSubscriber.create(); RaceTestUtils.race(receive::dispose, () -> receive.subscribe(subscriber)); subscriber.block(Duration.ofSeconds(5)); } }
public void joinChannel(String name) { joinChannel(configuration.findChannel(name)); }
@Test public void doJoinChannelTestKey() { endpoint.joinChannel("#chan2"); verify(connection).doJoin("#chan2", "chan2key"); }
@Override public void init(InitContext context) { String state = context.generateCsrfState(); try (OAuth20Service scribe = scribeFactory.newScribe(gitLabSettings, context.getCallbackUrl(), scribeApi)) { String url = scribe.getAuthorizationUrl(state); context.redirectTo(url); } catch (IOException e) { throw new IllegalStateException(e); } }
@Test public void init_whenErrorWhileBuildingScribe_shouldReThrow() { IllegalStateException exception = new IllegalStateException("GitLab authentication is disabled"); when(scribeFactory.newScribe(any(), any(), any())).thenThrow(exception); when(initContext.getCallbackUrl()).thenReturn("http://server/callback"); assertThatIllegalStateException() .isThrownBy(() -> gitLabIdentityProvider.init(initContext)) .isEqualTo(exception); }
public void set( final E[] records, final int num, final long offset, final long skipCountOfFirst) { this.records = records; this.num = num; this.pos = 0; this.recordAndPosition.set(null, offset, skipCountOfFirst); }
@Test void testRecycler() { final AtomicBoolean recycled = new AtomicBoolean(); final ArrayResultIterator<Object> iter = new ArrayResultIterator<>(() -> recycled.set(true)); iter.releaseBatch(); assertThat(recycled.get()).isTrue(); }
@Override public String getDescription() { return "Checks executed before computation of measures"; }
@Test public void test_getDescription() { assertThat(newStep().getDescription()).isNotEmpty(); }
@Override public void removeNetwork(String networkId) { checkArgument(!Strings.isNullOrEmpty(networkId), ERR_NULL_NETWORK_ID); synchronized (this) { // if (isNetworkInUse(networkId)) { // final String error = String.format(MSG_NETWORK, networkId, ERR_IN_USE); // throw new IllegalStateException(error); // } KubevirtNetwork network = networkStore.removeNetwork(networkId); if (network != null) { log.info(String.format(MSG_NETWORK, network.name(), MSG_REMOVED)); } } }
@Test(expected = IllegalArgumentException.class) public void testRemoveNetworkWithNull() { target.removeNetwork(null); }
public String getCacheDirectoryPath() { return this.context != null ? absPath(this.context.getCacheDir()) : ""; }
@Test public void getCacheDirectoryPathIsNotEmpty() { assertThat(contextUtil.getCacheDirectoryPath(), endsWith("/cache")); }
@Override public boolean archive(String gcsUrl, byte[] data) { BlobInfo blobInfo = parseBlobInfo(gcsUrl); if (data.length <= options.chunkUploadThresholdInBytes) { // Create the blob in one request. logger.atInfo().log("Archiving data to GCS at '%s' in one request.", gcsUrl); storage.create(blobInfo, data); return true; } // When content is large (1MB or more) it is recommended to write it in chunks via the blob's // channel writer. logger.atInfo().log( "Content is larger than threshold, archiving data to GCS at '%s' in chunks.", gcsUrl); try (WriteChannel writer = storage.writer(blobInfo)) { for (int chunkOffset = 0; chunkOffset < data.length; chunkOffset += options.chunkSizeInBytes) { int chunkSize = Math.min(data.length - chunkOffset, options.chunkSizeInBytes); writer.write(ByteBuffer.wrap(data, chunkOffset, chunkSize)); } return true; } catch (IOException e) { logger.atSevere().withCause(e).log("Unable to archving data to GCS at '%s'.", gcsUrl); return false; } }
@Test public void archive_withLargeSizeBlob_createsBlobWithWriter() throws IOException { options.chunkSizeInBytes = 8; options.chunkUploadThresholdInBytes = 16; doReturn(mockWriter) .when(mockStorage) .writer(eq(BlobInfo.newBuilder(BUCKET_ID, OBJECT_ID).build())); GoogleCloudStorageArchiver archiver = archiverFactory.create(mockStorage); byte[] dataToArchive = newPreFilledByteArray(20); int numOfChunks = (int) Math.ceil((double) dataToArchive.length / options.chunkSizeInBytes); boolean succeeded = archiver.archive(buildGcsUrl(BUCKET_ID, OBJECT_ID), dataToArchive); assertThat(succeeded).isTrue(); verify(mockWriter, times(numOfChunks)).write(byteBufferCaptor.capture()); assertThat(byteBufferCaptor.getAllValues()) .containsExactly( ByteBuffer.wrap(dataToArchive, 0, 8), ByteBuffer.wrap(dataToArchive, 8, 8), ByteBuffer.wrap(dataToArchive, 16, 4)); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_parse_plugin() { properties.put(Constants.PLUGIN_PROPERTY_NAME, "message:target/cucumber.ndjson, html:target/cucumber.html"); RuntimeOptions options = cucumberPropertiesParser.parse(properties).build(); assertThat(options.plugins().get(0).pluginString(), equalTo("message:target/cucumber.ndjson")); assertThat(options.plugins().get(1).pluginString(), equalTo("html:target/cucumber.html")); }
public void checkSchemaChangeAllowed(Column other) throws DdlException { if (other.isGeneratedColumn()) { return; } if (Strings.isNullOrEmpty(other.name)) { throw new DdlException("Dest column name is empty"); } if (!ColumnType.isSchemaChangeAllowed(type, other.type)) { throw new DdlException("Can not change " + getType() + " to " + other.getType()); } if (this.aggregationType != other.aggregationType) { throw new DdlException("Can not change aggregation type"); } if (this.isAllowNull && !other.isAllowNull) { throw new DdlException("Can not change from nullable to non-nullable"); } // Adding a default value to a column without a default value is not supported if (!this.isSameDefaultValue(other)) { throw new DdlException(CAN_NOT_CHANGE_DEFAULT_VALUE); } if ((getPrimitiveType() == PrimitiveType.VARCHAR && other.getPrimitiveType() == PrimitiveType.VARCHAR) || (getPrimitiveType() == PrimitiveType.CHAR && other.getPrimitiveType() == PrimitiveType.VARCHAR) || (getPrimitiveType() == PrimitiveType.CHAR && other.getPrimitiveType() == PrimitiveType.CHAR)) { if (getStrLen() > other.getStrLen()) { throw new DdlException("Cannot shorten string length"); } } if (getPrimitiveType().isJsonType() && other.getPrimitiveType().isCharFamily()) { if (other.getStrLen() <= getPrimitiveType().getTypeSize()) { throw new DdlException("JSON needs minimum length of " + getPrimitiveType().getTypeSize()); } } }
@Test public void testSchemaChangeAllowedDefaultValue() { try { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, NOT_SET, ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("dt", ScalarType.createType(PrimitiveType.DATETIME), true, null, false, CURRENT_TIMESTAMP_VALUE, ""); Column newColumn = new Column("dt", ScalarType.createType(PrimitiveType.DATETIME), true, null, false, NOT_SET, ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, NOT_SET, ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, NOT_SET, ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, CURRENT_TIMESTAMP_VALUE, ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, CURRENT_TIMESTAMP_VALUE, ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("1")), ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } try { Column oldColumn = new Column("dt", ScalarType.createType(PrimitiveType.DATETIME), true, null, false, CURRENT_TIMESTAMP_VALUE, ""); Column newColumn = new Column("dt", ScalarType.createType(PrimitiveType.DATETIME), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); } catch (DdlException ex) { } }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException { try { final Credentials password = callback.prompt(session.getHost(), LocaleFactory.localizedString("Passphrase", "Cryptomator"), MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()), new LoginOptions().anonymous(true).keychain(false).icon(session.getHost().getProtocol().disk())); return new DescriptiveUrl(URI.create(new BundlesApi(new BrickApiClient(session)) .postBundles(new BundlesBody().password(password.isPasswordAuthentication() ? password.getPassword() : null).paths(Collections.singletonList( StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER))))).getUrl()), DescriptiveUrl.Type.signed); } catch(ApiException e) { throw new BrickExceptionMappingService().map(e); } }
@Test public void toDownloadUrl() throws Exception { final Path directory = new BrickDirectoryFeature(session).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new BrickTouchFeature(session).touch( new Path(directory, String.format("%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertNotNull(new BrickShareFeature(session).toDownloadUrl(test, Share.Sharee.world, null, new DisabledPasswordCallback()).getUrl()); new BrickDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public Type getType() { return token.getType(); }
@Test public void testTypeDescriptorGeneric() throws Exception { TypeDescriptor<List<String>> descriptor = new TypeDescriptor<List<String>>() {}; TypeToken<List<String>> token = new TypeToken<List<String>>() {}; assertEquals(token.getType(), descriptor.getType()); }
@Override public byte[] echo(byte[] message) { return read(null, ByteArrayCodec.INSTANCE, ECHO, message); }
@Test public void testEcho() { assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes()); }
public void writeBytes(byte[] value) { // Determine the length of the encoded array int encodedLength = 2; // for separator for (byte b : value) { if ((b == ESCAPE1) || (b == ESCAPE2)) { encodedLength += 2; } else { encodedLength++; } } byte[] encodedArray = new byte[encodedLength]; int copyStart = 0; int outIndex = 0; for (int i = 0; i < value.length; i++) { byte b = value[i]; if (b == ESCAPE1) { System.arraycopy(value, copyStart, encodedArray, outIndex, i - copyStart); outIndex += i - copyStart; encodedArray[outIndex++] = ESCAPE1; encodedArray[outIndex++] = NULL_CHARACTER; copyStart = i + 1; } else if (b == ESCAPE2) { System.arraycopy(value, copyStart, encodedArray, outIndex, i - copyStart); outIndex += i - copyStart; encodedArray[outIndex++] = ESCAPE2; encodedArray[outIndex++] = FF_CHARACTER; copyStart = i + 1; } } if (copyStart < value.length) { System.arraycopy(value, copyStart, encodedArray, outIndex, value.length - copyStart); outIndex += value.length - copyStart; } encodedArray[outIndex++] = ESCAPE1; encodedArray[outIndex] = SEPARATOR; encodedArrays.add(encodedArray); }
@Test public void testWriteBytes() { byte[] first = {'a', 'b', 'c'}; byte[] second = {'d', 'e', 'f'}; byte[] last = {'x', 'y', 'z'}; OrderedCode orderedCode = new OrderedCode(); orderedCode.writeBytes(first); byte[] firstEncoded = orderedCode.getEncodedBytes(); assertArrayEquals(orderedCode.readBytes(), first); orderedCode.writeBytes(first); orderedCode.writeBytes(second); orderedCode.writeBytes(last); byte[] allEncoded = orderedCode.getEncodedBytes(); assertArrayEquals(orderedCode.readBytes(), first); assertArrayEquals(orderedCode.readBytes(), second); assertArrayEquals(orderedCode.readBytes(), last); orderedCode = new OrderedCode(firstEncoded); orderedCode.writeBytes(second); orderedCode.writeBytes(last); assertArrayEquals(orderedCode.getEncodedBytes(), allEncoded); assertArrayEquals(orderedCode.readBytes(), first); assertArrayEquals(orderedCode.readBytes(), second); assertArrayEquals(orderedCode.readBytes(), last); orderedCode = new OrderedCode(allEncoded); assertArrayEquals(orderedCode.readBytes(), first); assertArrayEquals(orderedCode.readBytes(), second); assertArrayEquals(orderedCode.readBytes(), last); }
public Matrix tm(Matrix B) { if (m != B.m) { throw new IllegalArgumentException(String.format("Matrix multiplication A' * B: %d x %d vs %d x %d", m, n, B.m, B.n)); } Matrix C = new Matrix(n, B.n); C.mm(TRANSPOSE, this, NO_TRANSPOSE, B); return C; }
@Test public void testTm() { System.out.println("tm"); double[][] A = { {4.0, 1.2, 0.8}, {1.2, 9.0, 1.2}, {0.8, 1.2, 16.0} }; double[] B = {-4.0, 1.0, -3.0}; double[] C = {-1.0505, 0.2719, -0.1554}; Matrix a = Matrix.of(A).inverse(); Matrix b = Matrix.column(B); assertTrue(MathEx.equals((b.tm(a)).toArray()[0], C, 1E-4)); assertTrue(MathEx.equals((b.transpose().mm(a)).toArray()[0], C, 1E-4)); assertTrue(MathEx.equals((b.transpose(false).mm(a)).toArray()[0], C, 1E-4)); }
@Override public Path copy(final Path source, final Path copy, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { final String target = new DefaultUrlProvider(session.getHost()).toUrl(copy).find(DescriptiveUrl.Type.provider).getUrl(); if(session.getFeature(Lock.class) != null && status.getLockId() != null) { // Indicate that the client has knowledge of that state token session.getClient().copy(new DAVPathEncoder().encode(source), target, status.isExists(), Collections.singletonMap(HttpHeaders.IF, String.format("(<%s>)", status.getLockId()))); } else { session.getClient().copy(new DAVPathEncoder().encode(source), target, status.isExists()); } listener.sent(status.getLength()); return copy.withAttributes(source.attributes()); } catch(SardineException e) { throw new DAVExceptionMappingService().map("Cannot copy {0}", e, source); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, source); } }
@Test public void testCopyFile() throws Exception { final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DAVTouchFeature(session).touch(test, new TransferStatus()); final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DAVCopyFeature(session).copy(test, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); assertEquals(new DAVAttributesFinderFeature(session).find(test), new DAVAttributesFinderFeature(session).find(copy)); assertTrue(new DAVFindFeature(session).find(test)); assertTrue(new DAVFindFeature(session).find(copy)); new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
Set<String> getRetry() { return retry; }
@Test public void determineRetryWhenSetToGeneric() { Athena2QueryHelper helper = athena2QueryHelperWithRetry("generic"); assertEquals(new HashSet<>(Collections.singletonList("generic")), helper.getRetry()); }
public static CompletableFuture<Position> asyncGetLastValidPosition(final ManagedLedgerImpl ledger, final Predicate<Entry> predicate, final Position startPosition) { CompletableFuture<Position> future = new CompletableFuture<>(); internalAsyncReverseFindPositionOneByOne(ledger, predicate, startPosition, future); return future; }
@Test public void testGetLastValidPosition() throws Exception { final int maxEntriesPerLedger = 5; ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); managedLedgerConfig.setMaxEntriesPerLedger(maxEntriesPerLedger); managedLedgerConfig.setRetentionSizeInMB(10); managedLedgerConfig.setRetentionTime(5, TimeUnit.MINUTES); ManagedLedger ledger = factory.open("testReverseFindPositionOneByOne", managedLedgerConfig); String matchEntry = "match-entry"; String noMatchEntry = "nomatch-entry"; Predicate<Entry> predicate = entry -> { String entryValue = entry.getDataBuffer().toString(UTF_8); return matchEntry.equals(entryValue); }; // New ledger will return the last position, regardless of whether the conditions are met or not. Position position = ManagedLedgerImplUtils.asyncGetLastValidPosition((ManagedLedgerImpl) ledger, predicate, ledger.getLastConfirmedEntry()).get(); assertEquals(ledger.getLastConfirmedEntry(), position); for (int i = 0; i < maxEntriesPerLedger - 1; i++) { ledger.addEntry(matchEntry.getBytes(StandardCharsets.UTF_8)); } Position lastMatchPosition = ledger.addEntry(matchEntry.getBytes(StandardCharsets.UTF_8)); for (int i = 0; i < maxEntriesPerLedger; i++) { ledger.addEntry(noMatchEntry.getBytes(StandardCharsets.UTF_8)); } // Returns last position of entry is "match-entry" position = ManagedLedgerImplUtils.asyncGetLastValidPosition((ManagedLedgerImpl) ledger, predicate, ledger.getLastConfirmedEntry()).get(); assertEquals(position, lastMatchPosition); ledger.close(); }
public static boolean fullyDelete(final File dir) { return fullyDelete(dir, false); }
@Test (timeout = 30000) public void testFailFullyDelete() throws IOException { // Windows Dir.setWritable(false) does not work for directories assumeNotWindows(); LOG.info("Running test to verify failure of fullyDelete()"); setupDirsAndNonWritablePermissions(); boolean ret = FileUtil.fullyDelete(new MyFile(del)); validateAndSetWritablePermissions(true, ret); }
public JetConfig getJetConfig() { return jetConfig; }
@Test public void when_instanceIsCreatedWithOverriddenDefaultWildcardConfiguration_then_defaultConfigurationIsNotChanged() { Config config = new Config(); DataPersistenceConfig dataPersistenceConfig = new DataPersistenceConfig(); dataPersistenceConfig.setEnabled(true); config.addMapConfig(getMapConfig("*", dataPersistenceConfig)); config.getJetConfig().setEnabled(true); HazelcastInstance instance = createHazelcastInstance(config); MapConfig otherMapConfig = ((MapProxyImpl) instance.getMap("otherMap")).getMapConfig(); assertTrue(otherMapConfig.getDataPersistenceConfig().isEnabled()); assertEquals(DEFAULT_BACKUP_COUNT, otherMapConfig.getBackupCount()); }
@Override public String buildAuthRequestUrl(ServerConfiguration serverConfig, RegisteredClient clientConfig, String redirectUri, String nonce, String state, Map<String, String> options, String loginHint) { // create our signed JWT for the request object JWTClaimsSet.Builder claims = new JWTClaimsSet.Builder(); //set parameters to JwtClaims claims.claim("response_type", "code"); claims.claim("client_id", clientConfig.getClientId()); claims.claim("scope", Joiner.on(" ").join(clientConfig.getScope())); // build our redirect URI claims.claim("redirect_uri", redirectUri); // this comes back in the id token claims.claim("nonce", nonce); // this comes back in the auth request return claims.claim("state", state); // Optional parameters for (Entry<String, String> option : options.entrySet()) { claims.claim(option.getKey(), option.getValue()); } // if there's a login hint, send it if (!Strings.isNullOrEmpty(loginHint)) { claims.claim("login_hint", loginHint); } JWSAlgorithm alg = clientConfig.getRequestObjectSigningAlg(); if (alg == null) { alg = signingAndValidationService.getDefaultSigningAlgorithm(); } SignedJWT jwt = new SignedJWT(new JWSHeader(alg), claims.build()); signingAndValidationService.signJwt(jwt, alg); try { URIBuilder uriBuilder = new URIBuilder(serverConfig.getAuthorizationEndpointUri()); uriBuilder.addParameter("request", jwt.serialize()); // build out the URI return uriBuilder.build().toString(); } catch (URISyntaxException e) { throw new AuthenticationServiceException("Malformed Authorization Endpoint Uri", e); } }
@Test public void buildAuthRequestUrl_withLoginHint() { String requestUri = urlBuilder.buildAuthRequestUrl(serverConfig, clientConfig, redirectUri, nonce, state, options, loginHint); // parsing the result UriComponentsBuilder builder = null; try { builder = UriComponentsBuilder.fromUri(new URI(requestUri)); } catch (URISyntaxException e1) { fail("URISyntaxException was thrown."); } UriComponents components = builder.build(); String jwtString = components.getQueryParams().get("request").get(0); JWTClaimsSet claims = null; try { SignedJWT jwt = SignedJWT.parse(jwtString); claims = jwt.getJWTClaimsSet(); } catch (ParseException e) { fail("ParseException was thrown."); } assertEquals(responseType, claims.getClaim("response_type")); assertEquals(clientConfig.getClientId(), claims.getClaim("client_id")); List<String> scopeList = Arrays.asList(((String) claims.getClaim("scope")).split(" ")); assertTrue(scopeList.containsAll(clientConfig.getScope())); assertEquals(redirectUri, claims.getClaim("redirect_uri")); assertEquals(nonce, claims.getClaim("nonce")); assertEquals(state, claims.getClaim("state")); for (String claim : options.keySet()) { assertEquals(options.get(claim), claims.getClaim(claim)); } assertEquals(loginHint, claims.getClaim("login_hint")); }
@Override protected SchemaTransform from(Configuration configuration) { return new ExplodeTransform(configuration); }
@Test @Category(NeedsRunner.class) public void testCrossProduct() { PCollection<Row> input = pipeline.apply(Create.of(INPUT_ROWS)).setRowSchema(INPUT_SCHEMA); PCollection<Row> exploded = PCollectionRowTuple.of(JavaExplodeTransformProvider.INPUT_ROWS_TAG, input) .apply( new JavaExplodeTransformProvider() .from( JavaExplodeTransformProvider.Configuration.builder() .setFields(ImmutableList.of("a", "c")) .setCrossProduct(true) .build())) .get(JavaExplodeTransformProvider.OUTPUT_ROWS_TAG); PAssert.that(exploded) .containsInAnyOrder( Row.withSchema(OUTPUT_SCHEMA).addValues(1, 1.5, "x").build(), Row.withSchema(OUTPUT_SCHEMA).addValues(2, 1.5, "x").build(), Row.withSchema(OUTPUT_SCHEMA).addValues(1, 1.5, "y").build(), Row.withSchema(OUTPUT_SCHEMA).addValues(2, 1.5, "y").build()); pipeline.run(); }
public void appendIssuesToResponse(List<IssueDto> issueDtos, Map<String, RuleDto> ruleCache, OutputStream outputStream) { try { for (IssueDto issueDto : issueDtos) { RuleDto ruleDto = ruleCache.get(issueDto.getRuleUuid()); AbstractMessageLite messageLite = protobufObjectGenerator.generateIssueMessage(issueDto, ruleDto); messageLite.writeDelimitedTo(outputStream); } outputStream.flush(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void appendIssuesToResponse_outputStreamIsCalledAtLeastOnce() throws IOException { OutputStream outputStream = mock(OutputStream.class); IssueDto issueDto = new IssueDto(); issueDto.setFilePath("filePath"); issueDto.setKee("key"); issueDto.setStatus("OPEN"); issueDto.setRuleKey("repo", "rule"); underTest.appendIssuesToResponse(List.of(issueDto), emptyMap(), outputStream); verify(outputStream, atLeastOnce()).write(any(byte[].class), anyInt(), anyInt()); }
public void writeInt6(final long value) { // TODO }
@Test void assertWriteInt6() { assertDoesNotThrow(() -> new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeInt6(1L)); }
public Collection<SQLToken> generateSQLTokens(final SelectStatementContext selectStatementContext) { Collection<SQLToken> result = new LinkedHashSet<>(generateSelectSQLTokens(selectStatementContext)); selectStatementContext.getSubqueryContexts().values().stream().map(this::generateSelectSQLTokens).forEach(result::addAll); return result; }
@Test void assertGenerateSQLTokensWhenOwnerMatchTableName() { ColumnSegment column = new ColumnSegment(0, 0, new IdentifierValue("mobile")); column.setOwner(new OwnerSegment(0, 0, new IdentifierValue("doctor"))); ProjectionsSegment projections = mock(ProjectionsSegment.class); when(projections.getProjections()).thenReturn(Collections.singleton(new ColumnProjectionSegment(column))); SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(sqlStatementContext.getSubqueryType()).thenReturn(null); when(sqlStatementContext.getDatabaseType()).thenReturn(databaseType); when(sqlStatementContext.getSqlStatement().getProjections()).thenReturn(projections); when(sqlStatementContext.getSubqueryContexts().values()).thenReturn(Collections.emptyList()); SimpleTableSegment doctorTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor"))); SimpleTableSegment doctorOneTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor1"))); when(sqlStatementContext.getTablesContext()).thenReturn(new TablesContext(Arrays.asList(doctorTable, doctorOneTable), databaseType, DefaultDatabase.LOGIC_NAME)); when(sqlStatementContext.getProjectionsContext().getProjections()).thenReturn(Collections.singleton(new ColumnProjection("doctor", "mobile", null, databaseType))); Collection<SQLToken> actual = generator.generateSQLTokens(sqlStatementContext); assertThat(actual.size(), is(1)); }
public static void main( String[] args ) { // suppress the Dock icon on OS X System.setProperty("apple.awt.UIElement", "true"); int exitCode = new CommandLine(new ExtractText()).execute(args); System.exit(exitCode); }
@Test void testPDFBoxRepeatableSubcommandAddFileName() throws Exception { PDFBox.main(new String[] { "export:text", "-i", testfile1, "-console", "-addFileName", "export:text", "-i", testfile2, "-console", "-addFileName" }); String result = out.toString("UTF-8"); assertTrue(result.contains("PDF1")); assertTrue(result.contains("PDF2")); assertTrue(result.contains("PDF file: " + filename1)); assertTrue(result.contains("Hello")); assertTrue(result.contains("World.")); assertTrue(result.contains("PDF file: " + filename2)); }
@Override public TypeDescriptor<Map<K, V>> getEncodedTypeDescriptor() { return new TypeDescriptor<Map<K, V>>() {}.where( new TypeParameter<K>() {}, keyCoder.getEncodedTypeDescriptor()) .where(new TypeParameter<V>() {}, valueCoder.getEncodedTypeDescriptor()); }
@Test public void testEncodedTypeDescriptor() throws Exception { TypeDescriptor<Map<Integer, String>> typeDescriptor = new TypeDescriptor<Map<Integer, String>>() {}; assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(typeDescriptor)); }
@SuppressWarnings("varargs") @SafeVarargs @Udf public final <T> T coalesce(final T first, final T... others) { if (first != null) { return first; } if (others == null) { return null; } return Arrays.stream(others) .filter(Objects::nonNull) .findFirst() .orElse(null); }
@Test public void shouldReturnNullForNullOnly() { assertThat(udf.coalesce(null), is(nullValue())); }
public String format(DataTable table) { StringBuilder result = new StringBuilder(); formatTo(table, result); return result.toString(); }
@Test void should_escape_table_delimiters() { DataTable table = DataTable.create(asList( singletonList("|"), singletonList("\\"), singletonList("\n"))); ; assertEquals("" + "| \\| |\n" + "| \\\\ |\n" + "| \\n |\n", formatter.format(table)); }
@Override public void run() throws Exception { //init all file systems List<PinotFSSpec> pinotFSSpecs = _spec.getPinotFSSpecs(); for (PinotFSSpec pinotFSSpec : pinotFSSpecs) { PinotFSFactory.register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec)); } //Get list of files to process URI inputDirURI = new URI(_spec.getInputDirURI()); if (inputDirURI.getScheme() == null) { inputDirURI = new File(_spec.getInputDirURI()).toURI(); } PinotFS inputDirFS = PinotFSFactory.create(inputDirURI.getScheme()); List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(inputDirFS, inputDirURI, _spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively()); LOGGER.info("Found {} files to create Pinot segments!", filteredFiles.size()); //Get outputFS for writing output pinot segments URI outputDirURI = new URI(_spec.getOutputDirURI()); if (outputDirURI.getScheme() == null) { outputDirURI = new File(_spec.getOutputDirURI()).toURI(); } PinotFS outputDirFS = PinotFSFactory.create(outputDirURI.getScheme()); outputDirFS.mkdir(outputDirURI); //Get staging directory for temporary output pinot segments String stagingDir = _spec.getExecutionFrameworkSpec().getExtraConfigs().get(STAGING_DIR); URI stagingDirURI = null; if (stagingDir != null) { stagingDirURI = URI.create(stagingDir); if (stagingDirURI.getScheme() == null) { stagingDirURI = new File(stagingDir).toURI(); } if (!outputDirURI.getScheme().equals(stagingDirURI.getScheme())) { throw new RuntimeException(String .format("The scheme of staging directory URI [%s] and output directory URI [%s] has to be same.", stagingDirURI, outputDirURI)); } outputDirFS.mkdir(stagingDirURI); } try { JavaSparkContext sparkContext = JavaSparkContext.fromSparkContext(SparkContext.getOrCreate()); // Pinot plugins are necessary to launch Pinot ingestion job from every mapper. // In order to ensure pinot plugins would be loaded to each worker, this method // tars entire plugins directory and set this file into Distributed cache. // Then each executor job will untar the plugin tarball, and set system properties accordingly. packPluginsToDistributedCache(sparkContext); // Add dependency jars if (_spec.getExecutionFrameworkSpec().getExtraConfigs().containsKey(DEPS_JAR_DIR)) { addDepsJarToDistributedCache(sparkContext, _spec.getExecutionFrameworkSpec().getExtraConfigs().get(DEPS_JAR_DIR)); } List<String> pathAndIdxList = new ArrayList<>(); if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) { Map<String, List<String>> localDirIndex = new HashMap<>(); for (String filteredFile : filteredFiles) { Path filteredParentPath = Paths.get(filteredFile).getParent(); if (!localDirIndex.containsKey(filteredParentPath.toString())) { localDirIndex.put(filteredParentPath.toString(), new ArrayList<>()); } localDirIndex.get(filteredParentPath.toString()).add(filteredFile); } for (String parentPath : localDirIndex.keySet()) { List<String> siblingFiles = localDirIndex.get(parentPath); Collections.sort(siblingFiles); for (int i = 0; i < siblingFiles.size(); i++) { pathAndIdxList.add(String.format("%s %d", siblingFiles.get(i), i)); } } } else { for (int i = 0; i < filteredFiles.size(); i++) { pathAndIdxList.add(String.format("%s %d", filteredFiles.get(i), i)); } } int numDataFiles = pathAndIdxList.size(); int jobParallelism = _spec.getSegmentCreationJobParallelism(); if (jobParallelism <= 0 || jobParallelism > numDataFiles) { jobParallelism = numDataFiles; } JavaRDD<String> pathRDD = sparkContext.parallelize(pathAndIdxList, jobParallelism); final String pluginsInclude = (sparkContext.getConf().contains(PLUGINS_INCLUDE_PROPERTY_NAME)) ? sparkContext.getConf() .get(PLUGINS_INCLUDE_PROPERTY_NAME) : null; final URI finalInputDirURI = inputDirURI; final URI finalOutputDirURI = (stagingDirURI == null) ? outputDirURI : stagingDirURI; // Prevent using lambda expression in Spark to avoid potential serialization exceptions, use inner function // instead. pathRDD.foreach(new VoidFunction<String>() { @Override public void call(String pathAndIdx) throws Exception { PluginManager.get().init(); for (PinotFSSpec pinotFSSpec : _spec.getPinotFSSpecs()) { PinotFSFactory .register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec)); } PinotFS finalOutputDirFS = PinotFSFactory.create(finalOutputDirURI.getScheme()); String[] splits = pathAndIdx.split(" "); String path = splits[0]; int idx = Integer.valueOf(splits[1]); // Load Pinot Plugins copied from Distributed cache. File localPluginsTarFile = new File(PINOT_PLUGINS_TAR_GZ); if (localPluginsTarFile.exists()) { File pluginsDirFile = new File(PINOT_PLUGINS_DIR + "-" + idx); try { TarCompressionUtils.untar(localPluginsTarFile, pluginsDirFile); } catch (Exception e) { LOGGER.error("Failed to untar local Pinot plugins tarball file [{}]", localPluginsTarFile, e); throw new RuntimeException(e); } LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_DIR_PROPERTY_NAME, pluginsDirFile.getAbsolutePath()); System.setProperty(PLUGINS_DIR_PROPERTY_NAME, pluginsDirFile.getAbsolutePath()); if (pluginsInclude != null) { LOGGER.info("Trying to set System Property: [{}={}]", PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude); System.setProperty(PLUGINS_INCLUDE_PROPERTY_NAME, pluginsInclude); } LOGGER.info("Pinot plugins System Properties are set at [{}], plugins includes [{}]", System.getProperty(PLUGINS_DIR_PROPERTY_NAME), System.getProperty(PLUGINS_INCLUDE_PROPERTY_NAME)); } else { LOGGER.warn("Cannot find local Pinot plugins tar file at [{}]", localPluginsTarFile.getAbsolutePath()); } URI inputFileURI = URI.create(path); if (inputFileURI.getScheme() == null) { inputFileURI = new URI(finalInputDirURI.getScheme(), inputFileURI.getSchemeSpecificPart(), inputFileURI.getFragment()); } //create localTempDir for input and output File localTempDir = new File(FileUtils.getTempDirectory(), "pinot-" + UUID.randomUUID()); File localInputTempDir = new File(localTempDir, "input"); FileUtils.forceMkdir(localInputTempDir); File localOutputTempDir = new File(localTempDir, "output"); FileUtils.forceMkdir(localOutputTempDir); //copy input path to local File localInputDataFile = new File(localInputTempDir, getFileName(inputFileURI)); LOGGER.info("Trying to copy input file from {} to {}", inputFileURI, localInputDataFile); PinotFSFactory.create(inputFileURI.getScheme()).copyToLocalFile(inputFileURI, localInputDataFile); //create task spec SegmentGenerationTaskSpec taskSpec = new SegmentGenerationTaskSpec(); taskSpec.setInputFilePath(localInputDataFile.getAbsolutePath()); taskSpec.setOutputDirectoryPath(localOutputTempDir.getAbsolutePath()); taskSpec.setRecordReaderSpec(_spec.getRecordReaderSpec()); taskSpec .setSchema(SegmentGenerationUtils.getSchema(_spec.getTableSpec().getSchemaURI(), _spec.getAuthToken())); taskSpec.setTableConfig( SegmentGenerationUtils.getTableConfig(_spec.getTableSpec().getTableConfigURI(), _spec.getAuthToken())); taskSpec.setSequenceId(idx); taskSpec.setSegmentNameGeneratorSpec(_spec.getSegmentNameGeneratorSpec()); taskSpec.setFailOnEmptySegment(_spec.isFailOnEmptySegment()); taskSpec.setCustomProperty(BatchConfigProperties.INPUT_DATA_FILE_URI_KEY, inputFileURI.toString()); SegmentGenerationTaskRunner taskRunner = new SegmentGenerationTaskRunner(taskSpec); String segmentName = taskRunner.run(); // Tar segment directory to compress file File localSegmentDir = new File(localOutputTempDir, segmentName); String segmentTarFileName = URIUtils.encode(segmentName + Constants.TAR_GZ_FILE_EXT); File localSegmentTarFile = new File(localOutputTempDir, segmentTarFileName); LOGGER.info("Tarring segment from: {} to: {}", localSegmentDir, localSegmentTarFile); TarCompressionUtils.createCompressedTarFile(localSegmentDir, localSegmentTarFile); long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir); long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile); LOGGER.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName, DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize)); // Move segment to output PinotFS URI relativeOutputPath = SegmentGenerationUtils.getRelativeOutputPath(finalInputDirURI, inputFileURI, finalOutputDirURI); URI outputSegmentTarURI = relativeOutputPath.resolve(segmentTarFileName); SegmentGenerationJobUtils.moveLocalTarFileToRemote(localSegmentTarFile, outputSegmentTarURI, _spec.isOverwriteOutput()); // Create and upload segment metadata tar file String metadataTarFileName = URIUtils.encode(segmentName + Constants.METADATA_TAR_GZ_FILE_EXT); URI outputMetadataTarURI = relativeOutputPath.resolve(metadataTarFileName); if (finalOutputDirFS.exists(outputMetadataTarURI) && (_spec.isOverwriteOutput() || !_spec.isCreateMetadataTarGz())) { LOGGER.info("Deleting existing metadata tar gz file: {}", outputMetadataTarURI); finalOutputDirFS.delete(outputMetadataTarURI, true); } if (taskSpec.isCreateMetadataTarGz()) { File localMetadataTarFile = new File(localOutputTempDir, metadataTarFileName); SegmentGenerationJobUtils.createSegmentMetadataTarGz(localSegmentDir, localMetadataTarFile); SegmentGenerationJobUtils.moveLocalTarFileToRemote(localMetadataTarFile, outputMetadataTarURI, _spec.isOverwriteOutput()); } FileUtils.deleteQuietly(localSegmentDir); FileUtils.deleteQuietly(localInputDataFile); } }); if (stagingDirURI != null) { LOGGER.info("Trying to copy segment tars from staging directory: [{}] to output directory [{}]", stagingDirURI, outputDirURI); outputDirFS.copyDir(stagingDirURI, outputDirURI); } } finally { if (stagingDirURI != null) { LOGGER.info("Trying to clean up staging directory: [{}]", stagingDirURI); outputDirFS.delete(stagingDirURI, true); } } }
@Test public void testSegmentGeneration() throws Exception { // TODO use common resource definitions & code shared with Hadoop unit test. // So probably need a pinot-batch-ingestion-common tests jar that we depend on. File testDir = Files.createTempDirectory("testSegmentGeneration-").toFile(); testDir.delete(); testDir.mkdirs(); File inputDir = new File(testDir, "input"); inputDir.mkdirs(); File inputFile = new File(inputDir, "input.csv"); FileUtils.writeLines(inputFile, Lists.newArrayList("col1,col2", "value1,1", "value2,2")); // Create an output directory, with two empty files in it. One we'll overwrite, // and one we'll leave alone. final String outputFilename = "myTable_OFFLINE_0.tar.gz"; final String existingFilename = "myTable_OFFLINE_100.tar.gz"; File outputDir = new File(testDir, "output"); FileUtils.touch(new File(outputDir, outputFilename)); FileUtils.touch(new File(outputDir, existingFilename)); // Set up schema file. final String schemaName = "myTable"; File schemaFile = new File(testDir, "myTable.schema"); Schema schema = new SchemaBuilder() .setSchemaName(schemaName) .addSingleValueDimension("col1", DataType.STRING) .addMetric("col2", DataType.INT) .build(); FileUtils.write(schemaFile, schema.toPrettyJsonString(), StandardCharsets.UTF_8); // Set up table config file. File tableConfigFile = new File(testDir, "myTable.table"); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE) .setTableName("myTable") .setNumReplicas(1) .build(); FileUtils.write(tableConfigFile, tableConfig.toJsonString(), StandardCharsets.UTF_8); SegmentGenerationJobSpec jobSpec = new SegmentGenerationJobSpec(); jobSpec.setJobType("SegmentCreation"); jobSpec.setInputDirURI(inputDir.toURI().toString()); jobSpec.setOutputDirURI(outputDir.toURI().toString()); jobSpec.setOverwriteOutput(false); RecordReaderSpec recordReaderSpec = new RecordReaderSpec(); recordReaderSpec.setDataFormat("csv"); recordReaderSpec.setClassName(CSVRecordReader.class.getName()); recordReaderSpec.setConfigClassName(CSVRecordReaderConfig.class.getName()); jobSpec.setRecordReaderSpec(recordReaderSpec); TableSpec tableSpec = new TableSpec(); tableSpec.setTableName("myTable"); tableSpec.setSchemaURI(schemaFile.toURI().toString()); tableSpec.setTableConfigURI(tableConfigFile.toURI().toString()); jobSpec.setTableSpec(tableSpec); ExecutionFrameworkSpec efSpec = new ExecutionFrameworkSpec(); efSpec.setName("standalone"); efSpec.setSegmentGenerationJobRunnerClassName(SparkSegmentGenerationJobRunner.class.getName()); jobSpec.setExecutionFrameworkSpec(efSpec); PinotFSSpec pfsSpec = new PinotFSSpec(); pfsSpec.setScheme("file"); pfsSpec.setClassName(LocalPinotFS.class.getName()); jobSpec.setPinotFSSpecs(Collections.singletonList(pfsSpec)); SparkSegmentGenerationJobRunner jobRunner = new SparkSegmentGenerationJobRunner(jobSpec); jobRunner.run(); // The output directory should still have the original file in it. File oldSegmentFile = new File(outputDir, existingFilename); Assert.assertTrue(oldSegmentFile.exists()); // The output directory should have the original file in it (since we aren't overwriting) File newSegmentFile = new File(outputDir, outputFilename); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() == 0); // Now run again, but this time with overwriting of output files, and confirm we got a valid segment file. jobSpec.setOverwriteOutput(true); jobRunner = new SparkSegmentGenerationJobRunner(jobSpec); jobRunner.run(); // The original file should still be there. Assert.assertTrue(oldSegmentFile.exists()); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() > 0); // FUTURE - validate contents of file? }
public List<StorageLocation> check( final Configuration conf, final Collection<StorageLocation> dataDirs) throws InterruptedException, IOException { final HashMap<StorageLocation, Boolean> goodLocations = new LinkedHashMap<>(); final Set<StorageLocation> failedLocations = new HashSet<>(); final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures = Maps.newHashMap(); final LocalFileSystem localFS = FileSystem.getLocal(conf); final CheckContext context = new CheckContext(localFS, expectedPermission); // Start parallel disk check operations on all StorageLocations. for (StorageLocation location : dataDirs) { goodLocations.put(location, true); Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(location, context); if (olf.isPresent()) { futures.put(location, olf.get()); } } if (maxVolumeFailuresTolerated >= dataDirs.size()) { throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ")."); } final long checkStartTimeMs = timer.monotonicNow(); // Retrieve the results of the disk checks. for (Map.Entry<StorageLocation, ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) { // Determine how much time we can allow for this check to complete. // The cumulative wait time cannot exceed maxAllowedTimeForCheck. final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs); final long timeLeftMs = Math.max(0, maxAllowedTimeForCheckMs - waitSoFarMs); final StorageLocation location = entry.getKey(); try { final VolumeCheckResult result = entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS); switch (result) { case HEALTHY: break; case DEGRADED: LOG.warn("StorageLocation {} appears to be degraded.", location); break; case FAILED: LOG.warn("StorageLocation {} detected as failed.", location); failedLocations.add(location); goodLocations.remove(location); break; default: LOG.error("Unexpected health check result {} for StorageLocation {}", result, location); } } catch (ExecutionException|TimeoutException e) { LOG.warn("Exception checking StorageLocation " + location, e.getCause()); failedLocations.add(location); goodLocations.remove(location); } } if (maxVolumeFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { if (dataDirs.size() == failedLocations.size()) { throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated); } } else { if (failedLocations.size() > maxVolumeFailuresTolerated) { throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated); } } if (goodLocations.size() == 0) { throw new DiskErrorException("All directories in " + DFS_DATANODE_DATA_DIR_KEY + " are invalid: " + failedLocations); } return new ArrayList<>(goodLocations.keySet()); }
@Test public void testInvalidConfigurationValues() throws Exception { final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY); Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 4); intercept(HadoopIllegalArgumentException.class, "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + " - 4. Value configured is >= to the " + "number of configured volumes (3).", () -> new StorageLocationChecker(conf, new FakeTimer()).check(conf, locations)); conf.unset(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY); conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); intercept(HadoopIllegalArgumentException.class, "Invalid value configured for dfs.datanode.disk.check.timeout" + " - 0 (should be > 0)", () -> new StorageLocationChecker(conf, new FakeTimer())); conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); intercept(HadoopIllegalArgumentException.class, "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + " - -2 should be greater than or equal to -1", () -> new StorageLocationChecker(conf, new FakeTimer())); }
public static CommonsConfigurationRetryConfiguration of(final Configuration configuration) throws ConfigParseException { CommonsConfigurationRetryConfiguration obj = new CommonsConfigurationRetryConfiguration(); try{ obj.getConfigs().putAll(obj.getProperties(configuration.subset(RETRY_CONFIGS_PREFIX))); obj.getInstances().putAll(obj.getProperties(configuration.subset(RETRY_INSTANCES_PREFIX))); return obj; }catch (Exception ex){ throw new ConfigParseException("Error creating retry configuration", ex); } }
@Test public void testFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); CommonsConfigurationRetryConfiguration retryConfiguration = CommonsConfigurationRetryConfiguration.of(config); assertConfigs(retryConfiguration.getConfigs()); assertInstances(retryConfiguration.getInstances()); }
public static VerificationMode times(final int count) { checkArgument(count >= 0, "Times count must not be less than zero"); return new TimesVerification(count); }
@Test public void should_fail_to_verify_while_once_expectation_can_not_be_met() throws Exception { httpServer(port(), hit); assertThrows(VerificationException.class, () -> hit.verify(by(uri("/foo")), times(1))); }
public Collection<IndexMetaData> revise(final String tableName, final Collection<IndexMetaData> originalMetaDataList) { Optional<? extends IndexReviser<T>> reviser = reviseEntry.getIndexReviser(rule, tableName); return reviser.isPresent() ? originalMetaDataList.stream() .map(each -> reviser.get().revise(tableName, each, rule)).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toCollection(LinkedHashSet::new)) : originalMetaDataList; }
@Test void assertReviseIsPresentIsTrue() { IndexReviser<T> reviser = mock(IndexReviser.class); IndexMetaData indexMetaData = new IndexMetaData("index"); doReturn(Optional.of(reviser)).when(metaDataReviseEntry).getIndexReviser(any(), anyString()); when(reviser.revise(anyString(), any(), any())).thenReturn(Optional.of(indexMetaData)); Collection<IndexMetaData> indexMetaDataCollection = Arrays.asList(new IndexMetaData("index1"), new IndexMetaData("index2")); Collection<IndexMetaData> actual = indexReviseEngine.revise("tableName", indexMetaDataCollection); assertThat(actual.size(), equalTo(1)); assertTrue(actual.contains(indexMetaData)); }
@Override @Nullable public Object convert(String value) { if (value == null || value.isEmpty()) { return null; } final Parser parser = new Parser(timeZone.toTimeZone()); final List<DateGroup> r = parser.parse(value); if (r.isEmpty() || r.get(0).getDates().isEmpty()) { return null; } return new DateTime(r.get(0).getDates().get(0), timeZone); }
@Test public void convertUsesEtcUTCIfTimeZoneSettingIsInvalid() throws Exception { Converter c = new FlexibleDateConverter(ImmutableMap.<String, Object>of("time_zone", "TEST")); final DateTime dateOnly = (DateTime) c.convert("2014-3-12"); assertThat(dateOnly.getZone()).isEqualTo(DateTimeZone.forID("Etc/UTC")); }
@Override public ServiceModel getServiceModelSnapshot() { try (LatencyMeasurement measurement = metrics.startServiceModelSnapshotLatencyMeasurement()) { return modelGenerator.toServiceModel(duperModelManager.getApplicationInfos(), serviceStatusProvider); } }
@Test public void sanityCheck() { SlobrokMonitorManagerImpl slobrokMonitorManager = mock(SlobrokMonitorManagerImpl.class); DuperModelManager duperModelManager = mock(DuperModelManager.class); ModelGenerator modelGenerator = mock(ModelGenerator.class); ServiceMonitorImpl serviceMonitor = new ServiceMonitorImpl( slobrokMonitorManager, mock(ServiceMonitorMetrics.class), duperModelManager, modelGenerator); ApplicationInfo application1 = mock(ApplicationInfo.class); ApplicationInfo application2 = mock(ApplicationInfo.class); List<ApplicationInfo> applications = Stream.of(application1, application2) .toList(); when(duperModelManager.getApplicationInfos()).thenReturn(applications); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); verify(duperModelManager, times(1)).getApplicationInfos(); verify(modelGenerator).toServiceModel(applications, slobrokMonitorManager); }
private RemotingCommand getAllMessageRequestMode(ChannelHandlerContext ctx, RemotingCommand request) { final RemotingCommand response = RemotingCommand.createResponseCommand(null); String content = this.brokerController.getQueryAssignmentProcessor().getMessageRequestModeManager().encode(); if (content != null && !content.isEmpty()) { try { response.setBody(content.getBytes(MixAll.DEFAULT_CHARSET)); } catch (UnsupportedEncodingException e) { LOGGER.error("get all message request mode from master error.", e); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("UnsupportedEncodingException " + e); return response; } } else { LOGGER.error("No message request mode in this broker, client: {} ", ctx.channel().remoteAddress()); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("No message request mode in this broker"); return response; } response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testGetAllMessageRequestMode() throws RemotingCommandException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_MESSAGE_REQUEST_MODE, null); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup) { String key = customModel.toString(); Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null; if (CACHE_SIZE > 0 && clazz == null) clazz = CACHE.get(key); if (clazz == null) { clazz = createClazz(customModel, lookup); if (customModel.isInternal()) { INTERNAL_CACHE.put(key, clazz); if (INTERNAL_CACHE.size() > 100) { CACHE.putAll(INTERNAL_CACHE); INTERNAL_CACHE.clear(); LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was " + INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?"); } } else if (CACHE_SIZE > 0) { CACHE.put(key, clazz); } } try { // The class does not need to be thread-safe as we create an instance per request CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance(); prio.init(customModel, lookup, CustomModel.getAreasAsMap(customModel.getAreas())); return new CustomWeighting.Parameters( prio::getSpeed, prio::calcMaxSpeed, prio::getPriority, prio::calcMaxPriority, customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(), customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty()); } catch (ReflectiveOperationException ex) { throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex); } }
@Test public void testSpeedFactorAndPriorityAndMaxSpeed() { EdgeIteratorState primary = graph.edge(0, 1).setDistance(10). set(roadClassEnc, PRIMARY).set(avgSpeedEnc, 80).set(accessEnc, true, true); EdgeIteratorState secondary = graph.edge(1, 2).setDistance(10). set(roadClassEnc, SECONDARY).set(avgSpeedEnc, 70).set(accessEnc, true, true); CustomModel customModel = new CustomModel(); customModel.addToPriority(If("road_class == PRIMARY", MULTIPLY, "0.9")); customModel.addToSpeed(If("true", LIMIT, avgSpeedEnc.getName())); customModel.addToSpeed(If("road_class == PRIMARY", MULTIPLY, "0.8")); CustomWeighting.Parameters parameters = CustomModelParser.createWeightingParameters(customModel, encodingManager); assertEquals(0.9, parameters.getEdgeToPriorityMapping().get(primary, false), 0.01); assertEquals(64, parameters.getEdgeToSpeedMapping().get(primary, false), 0.01); assertEquals(1, parameters.getEdgeToPriorityMapping().get(secondary, false), 0.01); assertEquals(70, parameters.getEdgeToSpeedMapping().get(secondary, false), 0.01); customModel.addToSpeed(If("road_class != PRIMARY", LIMIT, "50")); CustomWeighting.EdgeToDoubleMapping speedMapping = CustomModelParser.createWeightingParameters(customModel, encodingManager).getEdgeToSpeedMapping(); assertEquals(64, speedMapping.get(primary, false), 0.01); assertEquals(50, speedMapping.get(secondary, false), 0.01); }
public static void getSemanticPropsDualFromString( DualInputSemanticProperties result, String[] forwardedFirst, String[] forwardedSecond, String[] nonForwardedFirst, String[] nonForwardedSecond, String[] readFieldsFirst, String[] readFieldsSecond, TypeInformation<?> inType1, TypeInformation<?> inType2, TypeInformation<?> outType) { getSemanticPropsDualFromString( result, forwardedFirst, forwardedSecond, nonForwardedFirst, nonForwardedSecond, readFieldsFirst, readFieldsSecond, inType1, inType2, outType, false); }
@Test void testForwardedReadDual() { String[] forwardedFieldsFirst = {"f1->f2; f2->f3"}; String[] forwardedFieldsSecond = {"f1->f1; f2->f0"}; String[] readFieldsFirst = {"0;2"}; String[] readFieldsSecond = {"1"}; DualInputSemanticProperties dsp = new DualInputSemanticProperties(); SemanticPropUtil.getSemanticPropsDualFromString( dsp, forwardedFieldsFirst, forwardedFieldsSecond, null, null, readFieldsFirst, readFieldsSecond, fourIntTupleType, fourIntTupleType, fourIntTupleType); assertThat(dsp.getForwardingTargetFields(0, 1)).contains(2); assertThat(dsp.getForwardingTargetFields(0, 2)).contains(3); assertThat(dsp.getForwardingTargetFields(1, 1)).contains(1); assertThat(dsp.getForwardingTargetFields(1, 2)).contains(0); assertThat(dsp.getForwardingTargetFields(0, 0)).isEmpty(); assertThat(dsp.getForwardingTargetFields(1, 3)).isEmpty(); assertThat(dsp.getReadFields(0)).containsExactly(0, 2); assertThat(dsp.getReadFields(1)).containsExactly(1); }
@Override public void increment(final String key) { this.metrics.increment(this.threadContext, new IRubyObject[] { this.getSymbol(key) }); }
@Test public void testIncrement() { final NamespacedMetric metrics = this.getInstance().namespace("test"); metrics.increment("abc"); { final RubyHash metricStore = getMetricStore(new String[]{"test"}); assertThat(this.getMetricLongValue(metricStore, "abc")).isEqualTo(1); } metrics.increment("abc"); { final RubyHash metricStore = getMetricStore(new String[]{"test"}); assertThat(this.getMetricLongValue(metricStore, "abc")).isEqualTo(2); } }
@Override public int hashCode() { return Objects.hash(value, precision, sessionTimeZoneKey); }
@Test public void testEqualsHashcodeMicroseconds() { SqlTimestamp t1Micros = new SqlTimestamp(0, MICROSECONDS); SqlTimestamp t2Micros = new SqlTimestamp(0, MICROSECONDS); assertEquals(t1Micros, t2Micros); assertEquals(t1Micros.hashCode(), t2Micros.hashCode()); SqlTimestamp t3Micros = new SqlTimestamp(1, MICROSECONDS); assertNotEquals(t1Micros, t3Micros); SqlTimestamp t1Millis = new SqlTimestamp(0, MILLISECONDS); assertNotEquals(t1Micros, t1Millis); }
public static JobResult createFrom(AccessExecutionGraph accessExecutionGraph) { final JobID jobId = accessExecutionGraph.getJobID(); final JobStatus jobStatus = accessExecutionGraph.getState(); checkArgument( jobStatus.isTerminalState(), "The job " + accessExecutionGraph.getJobName() + '(' + jobId + ") is not in a " + "terminal state. It is in state " + jobStatus + '.'); final JobResult.Builder builder = new JobResult.Builder(); builder.jobId(jobId); builder.applicationStatus(ApplicationStatus.fromJobStatus(accessExecutionGraph.getState())); final long netRuntime = accessExecutionGraph.getStatusTimestamp(jobStatus) - accessExecutionGraph.getStatusTimestamp(JobStatus.INITIALIZING); // guard against clock changes final long guardedNetRuntime = Math.max(netRuntime, 0L); builder.netRuntime(guardedNetRuntime); builder.accumulatorResults(accessExecutionGraph.getAccumulatorsSerialized()); if (jobStatus == JobStatus.FAILED) { final ErrorInfo errorInfo = accessExecutionGraph.getFailureInfo(); checkNotNull(errorInfo, "No root cause is found for the job failure."); builder.serializedThrowable(errorInfo.getException()); } return builder.build(); }
@Test void testFailureResultRequiresFailureCause() { assertThatThrownBy( () -> JobResult.createFrom( new ArchivedExecutionGraphBuilder() .setJobID(new JobID()) .setState(JobStatus.FAILED) .build())) .isInstanceOf(NullPointerException.class); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testAdditionTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression( String.format("add(%s,%s,%s,%s,%s)", INT_SV_COLUMN, LONG_SV_COLUMN, FLOAT_SV_COLUMN, DOUBLE_SV_COLUMN, STRING_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof AdditionTransformFunction); Assert.assertEquals(transformFunction.getName(), AdditionTransformFunction.FUNCTION_NAME); double[] expectedValues = new double[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = (double) _intSVValues[i] + (double) _longSVValues[i] + (double) _floatSVValues[i] + _doubleSVValues[i] + Double.parseDouble(_stringSVValues[i]); } testTransformFunction(transformFunction, expectedValues); expression = RequestContextUtils.getExpression( String.format("add(add(12,%s),%s,add(add(%s,%s),0.34,%s),%s)", STRING_SV_COLUMN, DOUBLE_SV_COLUMN, FLOAT_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN, DOUBLE_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof AdditionTransformFunction); for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = ((12d + Double.parseDouble(_stringSVValues[i])) + _doubleSVValues[i] + ( ((double) _floatSVValues[i] + (double) _longSVValues[i]) + 0.34 + (double) _intSVValues[i]) + _doubleSVValues[i]); } testTransformFunction(transformFunction, expectedValues); expression = RequestContextUtils.getExpression(String.format("add(%s,%s)", DOUBLE_SV_COLUMN, BIG_DECIMAL_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof AdditionTransformFunction); BigDecimal[] expectedBigDecimalValues = new BigDecimal[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedBigDecimalValues[i] = BigDecimal.valueOf(_doubleSVValues[i]).add(_bigDecimalSVValues[i]); } testTransformFunction(transformFunction, expectedBigDecimalValues); expression = RequestContextUtils.getExpression( String.format("add(add(12,%s),%s,add(add(%s,%s),cast('12110.34556677889901122335678' as decimal),%s),%s)", STRING_SV_COLUMN, DOUBLE_SV_COLUMN, FLOAT_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN, BIG_DECIMAL_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof AdditionTransformFunction); BigDecimal val4 = new BigDecimal("12110.34556677889901122335678"); expectedBigDecimalValues = new BigDecimal[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { double val1 = 12d + Double.parseDouble(_stringSVValues[i]); double val2 = _doubleSVValues[i]; double val3 = (double) _floatSVValues[i] + (double) _longSVValues[i]; BigDecimal val6 = BigDecimal.valueOf(val3).add(val4).add(BigDecimal.valueOf(_intSVValues[i])); expectedBigDecimalValues[i] = BigDecimal.valueOf(val1).add(BigDecimal.valueOf(val2)).add(val6).add(_bigDecimalSVValues[i]); } testTransformFunction(transformFunction, expectedBigDecimalValues); }
@PATCH @Path("/{connector}/offsets") @Operation(summary = "Alter the offsets for the specified connector") public Response alterConnectorOffsets(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward, final @Context HttpHeaders headers, final @PathParam("connector") String connector, final ConnectorOffsets offsets) throws Throwable { if (offsets.offsets() == null || offsets.offsets().isEmpty()) { throw new BadRequestException("Partitions / offsets need to be provided for an alter offsets request"); } FutureCallback<Message> cb = new FutureCallback<>(); herder.alterConnectorOffsets(connector, offsets.toMap(), cb); Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "PATCH", headers, offsets, new TypeReference<Message>() { }, new IdentityTranslator<>(), forward); return Response.ok().entity(msg).build(); }
@Test public void testAlterOffsetsConnectorNotFound() { Map<String, ?> partition = new HashMap<>(); Map<String, ?> offset = new HashMap<>(); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); final ArgumentCaptor<Callback<Message>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new NotFoundException("Connector not found")) .when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture()); assertThrows(NotFoundException.class, () -> connectorsResource.alterConnectorOffsets(null, NULL_HEADERS, CONNECTOR_NAME, body)); }
@VisibleForTesting Manifest createManifest(Class mainClass, String defaultJobName) { Manifest manifest = new Manifest(); manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0"); boolean classHasMainMethod = false; try { Class returnType = mainClass.getMethod("main", String[].class).getReturnType(); if (returnType == Void.TYPE) { classHasMainMethod = true; } else { LOG.warn( "No Main-Class will be set in jar because main method in {} returns {}, expected void", mainClass, returnType); } } catch (NoSuchMethodException e) { LOG.warn("No Main-Class will be set in jar because {} lacks a main method.", mainClass); } if (classHasMainMethod) { manifest.getMainAttributes().put(Name.MAIN_CLASS, mainClass.getName()); } return manifest; }
@Test public void testCreateManifest_withInvalidMainMethod() { Manifest manifest = jarCreator.createManifest(EvilPipelineRunner.class, "job"); assertNull(manifest.getMainAttributes().getValue(Name.MAIN_CLASS)); }
@Override public Mono<GetPreKeysResponse> getPreKeys(final GetPreKeysRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); final ServiceIdentifier targetIdentifier = ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getTargetIdentifier()); final byte deviceId = DeviceIdUtil.validate(request.getDeviceId()); final String rateLimitKey = authenticatedDevice.accountIdentifier() + "." + authenticatedDevice.deviceId() + "__" + targetIdentifier.uuid() + "." + deviceId; return rateLimiters.getPreKeysLimiter().validateReactive(rateLimitKey) .then(Mono.fromFuture(() -> accountsManager.getByServiceIdentifierAsync(targetIdentifier)) .flatMap(Mono::justOrEmpty)) .switchIfEmpty(Mono.error(Status.NOT_FOUND.asException())) .flatMap(targetAccount -> KeysGrpcHelper.getPreKeys(targetAccount, targetIdentifier.identityType(), deviceId, keysManager)); }
@Test void getPreKeysAccountNotFound() { when(accountsManager.getByServiceIdentifierAsync(any())) .thenReturn(CompletableFuture.completedFuture(Optional.empty())); assertStatusException(Status.NOT_FOUND, () -> authenticatedServiceStub().getPreKeys(GetPreKeysRequest.newBuilder() .setTargetIdentifier(ServiceIdentifier.newBuilder() .setIdentityType(org.signal.chat.common.IdentityType.IDENTITY_TYPE_ACI) .setUuid(UUIDUtil.toByteString(UUID.randomUUID())) .build()) .build())); }
@Override @PublicAPI(usage = ACCESS) public String getName() { return WILDCARD_TYPE_NAME + boundsToString(); }
@Test public void wildcard_name_unbounded() { @SuppressWarnings("unused") class ClassWithUnboundTypeParameter<T extends List<?>> { } JavaWildcardType type = importWildcardTypeOf(ClassWithUnboundTypeParameter.class); assertThat(type.getName()).isEqualTo("?"); }
@SuppressFBWarnings(justification = "try with resources will clean up the input stream", value = {"OBL_UNSATISFIED_OBLIGATION"}) public void parseHints(File file) throws HintParseException { try (InputStream fis = new FileInputStream(file)) { parseHints(fis); } catch (SAXException | IOException ex) { LOGGER.debug("", ex); throw new HintParseException(ex); } }
@Test public void testParseHintsXSDSelection() throws Exception { File file = BaseTest.getResourceAsFile(this, "hints_invalid.xml"); HintParser instance = new HintParser(); Exception exception = Assert.assertThrows(org.owasp.dependencycheck.xml.hints.HintParseException.class, () -> instance.parseHints(file)); Assert.assertTrue(exception.getMessage().contains("Line=7, Column=133: cvc-enumeration-valid: Value 'version' is not facet-valid with respect to enumeration '[vendor, product]'. It must be a value from the enumeration.")); }
@Override public boolean accept(Path path) { if (engineContext == null) { this.engineContext = new HoodieLocalEngineContext(this.conf); } if (LOG.isDebugEnabled()) { LOG.debug("Checking acceptance for path " + path); } Path folder = null; try { if (storage == null) { storage = new HoodieHadoopStorage(convertToStoragePath(path), conf); } // Assumes path is a file folder = path.getParent(); // get the immediate parent. // Try to use the caches. if (nonHoodiePathCache.contains(folder.toString())) { if (LOG.isDebugEnabled()) { LOG.debug("Accepting non-hoodie path from cache: " + path); } return true; } if (hoodiePathCache.containsKey(folder.toString())) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("%s Hoodie path checked against cache, accept => %s \n", path, hoodiePathCache.get(folder.toString()).contains(path))); } return hoodiePathCache.get(folder.toString()).contains(path); } // Skip all files that are descendants of .hoodie in its path. String filePath = path.toString(); if (filePath.contains("/" + HoodieTableMetaClient.METAFOLDER_NAME + "/") || filePath.endsWith("/" + HoodieTableMetaClient.METAFOLDER_NAME)) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("Skipping Hoodie Metadata file %s \n", filePath)); } return false; } // Perform actual checking. Path baseDir; StoragePath storagePath = convertToStoragePath(folder); if (HoodiePartitionMetadata.hasPartitionMetadata(storage, storagePath)) { HoodiePartitionMetadata metadata = new HoodiePartitionMetadata(storage, storagePath); metadata.readFromFS(); baseDir = HoodieHiveUtils.getNthParent(folder, metadata.getPartitionDepth()); } else { baseDir = safeGetParentsParent(folder); } if (baseDir != null) { // Check whether baseDir in nonHoodiePathCache if (nonHoodiePathCache.contains(baseDir.toString())) { if (LOG.isDebugEnabled()) { LOG.debug("Accepting non-hoodie path from cache: " + path); } return true; } HoodieTableFileSystemView fsView = null; try { HoodieTableMetaClient metaClient = metaClientCache.get(baseDir.toString()); if (null == metaClient) { metaClient = HoodieTableMetaClient.builder() .setConf(storage.getConf().newInstance()).setBasePath(baseDir.toString()) .setLoadActiveTimelineOnLoad(true).build(); metaClientCache.put(baseDir.toString(), metaClient); } final Configuration conf = getConf(); final String timestampAsOf = conf.get(TIMESTAMP_AS_OF.key()); if (nonEmpty(timestampAsOf)) { validateTimestampAsOf(metaClient, timestampAsOf); // Build FileSystemViewManager with specified time, it's necessary to set this config when you may // access old version files. For example, in spark side, using "hoodie.datasource.read.paths" // which contains old version files, if not specify this value, these files will be filtered. fsView = FileSystemViewManager.createInMemoryFileSystemViewWithTimeline(engineContext, metaClient, HoodieInputFormatUtils.buildMetadataConfig(conf), metaClient.getActiveTimeline().filterCompletedInstants().findInstantsBeforeOrEquals(timestampAsOf)); } else { fsView = FileSystemViewManager.createInMemoryFileSystemView(engineContext, metaClient, HoodieInputFormatUtils.buildMetadataConfig(conf)); } String partition = HadoopFSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath().toString()), folder); List<HoodieBaseFile> latestFiles = fsView.getLatestBaseFiles(partition).collect(Collectors.toList()); // populate the cache if (!hoodiePathCache.containsKey(folder.toString())) { hoodiePathCache.put(folder.toString(), new HashSet<>()); } LOG.info("Based on hoodie metadata from base path: " + baseDir.toString() + ", caching " + latestFiles.size() + " files under " + folder); for (HoodieBaseFile lfile : latestFiles) { hoodiePathCache.get(folder.toString()).add(new Path(lfile.getPath())); } // accept the path, if its among the latest files. if (LOG.isDebugEnabled()) { LOG.debug(String.format("%s checked after cache population, accept => %s \n", path, hoodiePathCache.get(folder.toString()).contains(path))); } return hoodiePathCache.get(folder.toString()).contains(path); } catch (TableNotFoundException e) { // Non-hoodie path, accept it. if (LOG.isDebugEnabled()) { LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder.toString(), baseDir.toString())); } nonHoodiePathCache.add(folder.toString()); nonHoodiePathCache.add(baseDir.toString()); return true; } finally { if (fsView != null) { fsView.close(); } } } else { // files is at < 3 level depth in FS tree, can't be hoodie dataset if (LOG.isDebugEnabled()) { LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder.toString())); } nonHoodiePathCache.add(folder.toString()); return true; } } catch (Exception e) { String msg = "Error checking path :" + path + ", under folder: " + folder; LOG.error(msg, e); throw new HoodieException(msg, e); } }
@Test public void testPartitionPathsAsNonHoodiePaths() throws Exception { final String p1 = "2017/01/01"; final String p2 = "2017/01/02"; testTable.addCommit("001").getFileIdsWithBaseFilesInPartitions(p1, p2); Path partitionPath1 = testTable.getPartitionPath(p1).getParent(); Path partitionPath2 = testTable.getPartitionPath(p2).getParent(); assertTrue(pathFilter.accept(partitionPath1), "Directories should be accepted"); assertTrue(pathFilter.accept(partitionPath2), "Directories should be accepted"); assertEquals(2, pathFilter.nonHoodiePathCache.size(), "NonHoodiePathCache size should be 2"); }
@NotNull public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) { // 优先从 DB 中获取,因为 code 有且可以使用一次。 // 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次 SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state); if (socialUser != null) { return socialUser; } // 请求获取 AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state); Assert.notNull(authUser, "三方用户不能为空"); // 保存到 DB 中 socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid()); if (socialUser == null) { socialUser = new SocialUserDO(); } socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询 .setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken()))) .setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo())); if (socialUser.getId() == null) { socialUserMapper.insert(socialUser); } else { socialUserMapper.updateById(socialUser); } return socialUser; }
@Test public void testAuthSocialUser_update() { // 准备参数 Integer socialType = SocialTypeEnum.GITEE.getType(); Integer userType = randomEle(SocialTypeEnum.values()).getType(); String code = "tudou"; String state = "yuanma"; // mock 数据 socialUserMapper.insert(randomPojo(SocialUserDO.class).setType(socialType).setOpenid("test_openid")); // mock 方法 AuthUser authUser = randomPojo(AuthUser.class); when(socialClientService.getAuthUser(eq(socialType), eq(userType), eq(code), eq(state))).thenReturn(authUser); // 调用 SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state); // 断言 assertBindSocialUser(socialType, result, authUser); assertEquals(code, result.getCode()); assertEquals(state, result.getState()); }
@Override public void load(RoutingTable table) { synchronized (monitor) { try { table = table.routingMethod(RoutingMethod.sharedLayer4); // This router only supports layer 4 endpoints testConfig(table); loadConfig(table.asMap().size()); gcConfig(); } catch (IOException e) { throw new UncheckedIOException(e); } } }
@Test public void load_routing_table() { NginxTester tester = new NginxTester(); tester.clock.setInstant(Instant.parse("2022-01-01T15:00:00Z")); // Load routing table RoutingTable table0 = TestUtil.readRoutingTable("lbservices-config"); tester.load(table0) .assertVerifiedConfig(1) .assertLoadedConfig(true) .assertConfigContents("nginx.conf") .assertTemporaryConfigRemoved(true) .assertMetric(Nginx.CONFIG_RELOADS_METRIC, 1) .assertMetric(Nginx.OK_CONFIG_RELOADS_METRIC, 1) .assertMetric(Nginx.GENERATED_UPSTREAMS_METRIC, 5); // Loading the same table again does nothing tester.load(table0) .assertVerifiedConfig(1) .assertLoadedConfig(false) .assertConfigContents("nginx.conf") .assertTemporaryConfigRemoved(true) .assertMetric(Nginx.CONFIG_RELOADS_METRIC, 1) .assertMetric(Nginx.OK_CONFIG_RELOADS_METRIC, 1) .assertMetric(Nginx.GENERATED_UPSTREAMS_METRIC, 5); // A new table is loaded Map<RoutingTable.Endpoint, RoutingTable.Target> newEntries = new HashMap<>(table0.asMap()); newEntries.put(new RoutingTable.Endpoint("endpoint1", RoutingMethod.sharedLayer4), RoutingTable.Target.create(ApplicationId.from("t1", "a1", "i1"), ClusterSpec.Id.from("default"), ZoneId.from("prod", "us-north-1"), List.of(new RoutingTable.Real("host42", 4443, 1, true)))); RoutingTable table1 = new RoutingTable(newEntries, 43); // Verification of new table fails enough times to exhaust retries tester.processExecuter.withFailCount(10); try { tester.load(table1); fail("Expected exception"); } catch (Exception ignored) {} tester.assertVerifiedConfig(5) .assertLoadedConfig(false) .assertConfigContents("nginx.conf") .assertTemporaryConfigRemoved(false) .assertMetric(Nginx.CONFIG_RELOADS_METRIC, 1) .assertMetric(Nginx.OK_CONFIG_RELOADS_METRIC, 1); // Verification succeeds, with few enough failures tester.processExecuter.withFailCount(3); tester.load(table1) .assertVerifiedConfig(3) .assertLoadedConfig(true) .assertConfigContents("nginx-updated.conf") .assertTemporaryConfigRemoved(true) .assertProducedDiff() .assertRotatedFiles("nginxl4.conf-2022-01-01-15:00:00.000") .assertMetric(Nginx.CONFIG_RELOADS_METRIC, 2) .assertMetric(Nginx.OK_CONFIG_RELOADS_METRIC, 2); // Some time passes and new tables are loaded. Old rotated files are removed tester.clock.advance(Duration.ofDays(3)); tester.load(table0); tester.clock.advance(Duration.ofDays(4).plusSeconds(1)); tester.load(table1) .assertProducedDiff() .assertRotatedFiles("nginxl4.conf-2022-01-04-15:00:00.000", "nginxl4.conf-2022-01-08-15:00:01.000"); tester.clock.advance(Duration.ofDays(4)); tester.load(table1) // Same table is loaded again, which is a no-op, but old rotated files are still removed .assertRotatedFiles("nginxl4.conf-2022-01-08-15:00:01.000"); }