focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) { while (in.readableBytes() >= 1 + MySQLBinlogEventHeader.MYSQL_BINLOG_EVENT_HEADER_LENGTH) { in.markReaderIndex(); MySQLPacketPayload payload = new MySQLPacketPayload(in, ctx.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get()); checkPayload(payload); MySQLBinlogEventHeader binlogEventHeader = new MySQLBinlogEventHeader(payload, binlogContext.getChecksumLength()); if (!checkEventIntegrity(in, binlogEventHeader)) { return; } Optional<MySQLBaseBinlogEvent> binlogEvent = decodeEvent(binlogEventHeader, payload); if (!binlogEvent.isPresent()) { skipChecksum(binlogEventHeader.getEventType(), in); return; } if (binlogEvent.get() instanceof PlaceholderBinlogEvent) { out.add(binlogEvent.get()); skipChecksum(binlogEventHeader.getEventType(), in); return; } if (decodeWithTX) { processEventWithTX(binlogEvent.get(), out); } else { processEventIgnoreTX(binlogEvent.get(), out); } skipChecksum(binlogEventHeader.getEventType(), in); } }
@Test void assertDecodeWithPacketError() { ByteBuf byteBuf = Unpooled.buffer(); byteBuf.writeByte(1); byteBuf.writeByte(255); byteBuf.writeBytes(new byte[20]); assertThrows(RuntimeException.class, () -> binlogEventPacketDecoder.decode(channelHandlerContext, byteBuf, null)); }
@Override public void close() { close(Duration.ofMillis(Long.MAX_VALUE)); }
@Test public void testProducerJmxPrefix() throws Exception { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.put("client.id", "client-1"); KafkaProducer<String, String> producer = new KafkaProducer<>( props, new StringSerializer(), new StringSerializer()); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); MetricName testMetricName = producer.metrics.metricName("test-metric", "grp1", "test metric"); producer.metrics.addMetric(testMetricName, new Avg()); assertNotNull(server.getObjectInstance(new ObjectName("kafka.producer:type=grp1,client-id=client-1"))); producer.close(); }
@VisibleForTesting String buildBody(Stream stream, AlertCondition.CheckResult checkResult, List<Message> backlog) { final String template; if (pluginConfig == null || pluginConfig.getString("body") == null) { template = bodyTemplate; } else { template = pluginConfig.getString("body"); } Map<String, Object> model = getModel(stream, checkResult, backlog); return this.templateEngine.transform(template, model); }
@Test public void buildBodyContainsInfoMessageIfWebInterfaceURLIsNotSet() throws Exception { final EmailConfiguration configuration = new EmailConfiguration() { @Override public URI getWebInterfaceUri() { return null; } }; this.emailAlertSender = new FormattedEmailAlertSender(configuration, mockNotificationService, nodeId, templateEngine, emailFactory); Stream stream = mock(Stream.class); when(stream.getId()).thenReturn("123456"); when(stream.getTitle()).thenReturn("Stream Title"); AlertCondition alertCondition = mock(AlertCondition.class); AlertCondition.CheckResult checkResult = mock(AbstractAlertCondition.CheckResult.class); when(checkResult.getTriggeredAt()).thenReturn(new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC)); when(checkResult.getTriggeredCondition()).thenReturn(alertCondition); String body = emailAlertSender.buildBody(stream, checkResult, Collections.<Message>emptyList()); assertThat(body).contains("Stream URL: Please configure 'transport_email_web_interface_url' in your Graylog configuration file."); }
@Override protected void parse(final ProtocolFactory protocols, final Local file) throws AccessDeniedException { try { final JsonReader reader = new JsonReader(new InputStreamReader(file.getInputStream(), StandardCharsets.UTF_8)); reader.beginArray(); String url; String user; boolean ssl; Protocol protocol; while(reader.hasNext()) { reader.beginObject(); boolean skip = false; url = null; ssl = false; protocol = null; user = null; while(reader.hasNext()) { final String name = reader.nextName(); switch(name) { case "url": url = this.readNext(name, reader); if(StringUtils.isBlank(url)) { skip = true; } break; case "ssl": ssl = reader.nextBoolean(); break; case "user": user = this.readNext(name, reader); break; case "type": final String type = this.readNext(name, reader); switch(type) { case "google_cloud_storage": protocol = protocols.forType(Protocol.Type.googlestorage); break; case "gdrive": protocol = protocols.forType(Protocol.Type.googledrive); break; default: protocol = protocols.forName(type); } break; default: log.warn(String.format("Ignore property %s", name)); reader.skipValue(); break; } } reader.endObject(); if(!skip && protocol != null && StringUtils.isNotBlank(user)) { if(ssl) { switch(protocol.getType()) { case ftp: protocol = protocols.forScheme(Scheme.ftps); break; case dav: protocol = protocols.forScheme(Scheme.davs); break; } } try { this.add(new HostParser(protocols, protocol).get(url)); } catch(HostParserException e) { log.warn(e); } } } reader.endArray(); } catch(IllegalStateException | IOException e) { throw new LocalAccessDeniedException(e.getMessage(), e); } }
@Test(expected = AccessDeniedException.class) public void testParseNotFound() throws Exception { new NetDrive2BookmarkCollection().parse(new ProtocolFactory(Collections.emptySet()), new Local(System.getProperty("java.io.tmpdir"), "f")); }
@Override public void pluginJarRemoved(BundleOrPluginFileDetails bundleOrPluginFileDetails) { GoPluginDescriptor existingDescriptor = registry.getPluginByIdOrFileName(null, bundleOrPluginFileDetails.file().getName()); if (existingDescriptor == null) { return; } try { LOGGER.info("Plugin removal starting: {}", bundleOrPluginFileDetails.file()); boolean externalPlugin = !bundleOrPluginFileDetails.isBundledPlugin(); boolean bundledPlugin = existingDescriptor.isBundledPlugin(); boolean externalPluginWithSameIdAsBundledPlugin = bundledPlugin && externalPlugin; if (externalPluginWithSameIdAsBundledPlugin) { LOGGER.info("External Plugin file '{}' having same name as bundled plugin file has been removed. Refusing to unload bundled plugin with id: '{}'", bundleOrPluginFileDetails.file(), existingDescriptor.id()); return; } removePlugin(existingDescriptor.bundleDescriptor()); } finally { LOGGER.info("Plugin removal finished: {}", bundleOrPluginFileDetails.file()); } }
@Test void shouldNotRemoveBundledPluginExternalPluginJarRemovedWithSameId() { String pluginId = "plugin-id"; String pluginJarFileName = "plugin-file-name"; File pluginJarFile = mock(File.class); when(pluginJarFile.getName()).thenReturn(pluginJarFileName); when(pluginJarFile.getAbsoluteFile()).thenReturn(new File(pluginJarFileName)); final GoPluginDescriptor oldPluginDescriptor = getPluginDescriptor(pluginId, "1.0", null, null, true, null); GoPluginBundleDescriptor oldPluginBundleDescriptor = new GoPluginBundleDescriptor(oldPluginDescriptor); when(registry.getPluginByIdOrFileName(null, pluginJarFileName)).thenReturn(oldPluginDescriptor); DefaultPluginJarChangeListener spy = spy(listener); spy.pluginJarRemoved(new BundleOrPluginFileDetails(pluginJarFile, false, pluginWorkDir)); verify(registry, never()).unloadPlugin(oldPluginBundleDescriptor); verify(pluginLoader, never()).unloadPlugin(oldPluginBundleDescriptor); }
@Override public void close() throws IOException { if (mClosed) { return; } mLocalOutputStream.close(); String path = getUploadPath(); try { // Generate the object metadata by setting server side encryption, md5 checksum, the file // length, and encoding as octet stream since no assumptions are made about the file type ObjectMetadata meta = new ObjectMetadata(); if (mSseEnabled) { meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } if (mHash != null) { meta.setContentMD5(new String(Base64.encode(mHash.digest()))); } meta.setContentLength(mFile.length()); meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); // Generate the put request and wait for the transfer manager to complete the upload PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta); mContentHash = getTransferManager().upload(putReq).waitForUploadResult().getETag(); } catch (Exception e) { LOG.error("Failed to upload {}", path, e); throw new IOException(e); } finally { // Delete the temporary file on the local machine if the transfer manager completed the // upload or if the upload failed. if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath()); } // Set the closed flag, close can be retried until mFile.delete is called successfully mClosed = true; } }
@Test public void close() throws Exception { mStream.close(); assertEquals(mContentHash, mStream.getContentHash().get()); Mockito.verify(mFile).delete(); }
@Override public void run() { Optional<RoutingTable> table = tableSupplier.get(); table.ifPresent(this::reportHealth); reportConfigAge(); }
@Test public void upstream_metrics() { List<ServerGroup.Server> servers = List.of( new ServerGroup.Server("gateway.prod.music.vespa.us-east-2.prod", "10.78.114.166:4080", true), new ServerGroup.Server("gateway.prod.music.vespa.us-east-2.prod", "10.78.115.68:4080", true), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.114.166:4080", true), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.115.68:4080", true), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.114.166:4080", false), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.115.68:4080", false), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.114.166:4080", false), new ServerGroup.Server("qrs.prod.music.vespa.us-east-2.prod", "10.78.115.68:4080", false), new ServerGroup.Server("donbot.vespa.us-east-2.prod", "10.201.8.47:4080", true), new ServerGroup.Server("donbot.vespa.us-east-2.prod", "10.201.14.46:4080", false), new ServerGroup.Server("appearing-in-routing.not.us-east-2.prod", "10.201.14.50:4080", false) ); healthService.setStatus(new ServerGroup(servers)); reporter.run(); assertEquals(2D, getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target0)), Double.MIN_VALUE); assertEquals(0D, getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target0)), Double.MIN_VALUE); assertEquals(0D, getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target0)), Double.MIN_VALUE); assertEquals(2L, getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target1)), Double.MIN_VALUE); assertEquals(4L, getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target1)), Double.MIN_VALUE); assertEquals(0L, getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target1)), Double.MIN_VALUE); assertEquals(1D, getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target2)), Double.MIN_VALUE); assertEquals(1D, getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target2)), Double.MIN_VALUE); assertEquals(0D, getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target2)), Double.MIN_VALUE); // If the application appears in routing table - but not in health check cache yet assertEquals(0D, getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target3)), Double.MIN_VALUE); assertEquals(0D, getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target3)), Double.MIN_VALUE); assertEquals(1D, getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target3)), Double.MIN_VALUE); // If the application does not appear in routing table - but still appears in cache assertNull(getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target4))); assertNull(getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target4))); assertNull(getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target4))); assertNull(getMetric(NginxMetricsReporter.UPSTREAM_UP_METRIC, dimensionsOf(target5))); assertNull(getMetric(NginxMetricsReporter.UPSTREAM_DOWN_METRIC, dimensionsOf(target5))); assertEquals(1D, getMetric(NginxMetricsReporter.UPSTREAM_UNKNOWN_METRIC, dimensionsOf(target5)), Double.MIN_VALUE); }
@Override public void init() { beforeHandoverMode = false; clientClosed = false; singleThreaded = new Object(); dataListListenerStack = new ArrayDeque<>(); networkIsInitialized = false; isInitialized = true; }
@Test void testAsyncSectionedRenderer() throws IOException, InterruptedException { StringDataList dataList = createDataListWithStrangeStrings(); TestRenderer renderer = new TestRenderer(); renderer.init(); String str = render(renderer, dataList); assertEquals(" beginResponse beginList[f\\o\"o, [b/a\br, f\f\no\ro\tbar\u0005]] dataf\\o\"o beginList[b/a\br, " + "f\f\no\ro\tbar\u0005] datab/a\br dataf\f\no\ro\tbar\u0005 endList[b/a\br, f\f\no\ro\tbar\u0005] endList[f\\o\"o, [b/a\br, f\f\no\ro\tbar\u0005]] endResponse", str); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test(expected = StackOverflowError.class) public void shouldNotRetryUsingSingleStackOverFlow() { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willThrow(new StackOverflowError("BAM!")); Single.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test(); then(helloWorldService).should().returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero(); }
@Override public boolean userDefinedIndexMode(boolean enable) { if (meters.isEmpty() && meterIdGenerators.isEmpty()) { userDefinedIndexMode = enable; } else { log.warn("Unable to {} user defined index mode as store did" + "already some allocations", enable ? "activate" : "deactivate"); } return userDefinedIndexMode; }
@Test public void testInvalidDisableUserDefinedIndex() { testStoreMeterInUserDefinedIndexMode(); assertTrue(meterStore.userDefinedIndexMode(false)); }
@Override public File allocationBaseDirectory(long checkpointId) { return selectAllocationBaseDirectory( (((int) checkpointId) & Integer.MAX_VALUE) % allocationBaseDirs.length); }
@Test void allocationBaseDir() { for (int i = 0; i < 10; ++i) { assertThat(directoryProvider.allocationBaseDirectory(i)) .isEqualTo(allocBaseFolders[i % allocBaseFolders.length]); } }
public static DecimalNum valueOf(String val) { if (val.equalsIgnoreCase("NAN")) { throw new NumberFormatException(); } return new DecimalNum(val); }
@Test(expected = NumberFormatException.class) public void testValueOfForFloatNaNShouldThrowNumberFormatException() { DecimalNum.valueOf(Float.NaN); }
void processTimedOutExcessBlocks() { if (excessRedundancyMap.size() == 0) { return; } namesystem.writeLock(); long now = Time.monotonicNow(); int processed = 0; try { Iterator<Map.Entry<String, LightWeightHashSet<Block>>> iter = excessRedundancyMap.getExcessRedundancyMap().entrySet().iterator(); while (iter.hasNext() && processed < excessRedundancyTimeoutCheckLimit) { Map.Entry<String, LightWeightHashSet<Block>> entry = iter.next(); String datanodeUuid = entry.getKey(); LightWeightHashSet<Block> blocks = entry.getValue(); // Sort blocks by timestamp in descending order. List<ExcessBlockInfo> sortedBlocks = blocks.stream() .filter(block -> block instanceof ExcessBlockInfo) .map(block -> (ExcessBlockInfo) block) .sorted(Comparator.comparingLong(ExcessBlockInfo::getTimeStamp)) .collect(Collectors.toList()); for (ExcessBlockInfo excessBlockInfo : sortedBlocks) { if (processed >= excessRedundancyTimeoutCheckLimit) { break; } processed++; // If the datanode doesn't have any excess block that has exceeded the timeout, // can exit this loop. if (now <= excessBlockInfo.getTimeStamp() + excessRedundancyTimeout) { break; } BlockInfo blockInfo = excessBlockInfo.getBlockInfo(); BlockInfo bi = blocksMap.getStoredBlock(blockInfo); if (bi == null || bi.isDeleted()) { continue; } Iterator<DatanodeStorageInfo> iterator = blockInfo.getStorageInfos(); while (iterator.hasNext()) { DatanodeStorageInfo datanodeStorageInfo = iterator.next(); DatanodeDescriptor datanodeDescriptor = datanodeStorageInfo.getDatanodeDescriptor(); if (datanodeDescriptor.getDatanodeUuid().equals(datanodeUuid) && datanodeStorageInfo.getState().equals(State.NORMAL)) { final Block b = getBlockOnStorage(blockInfo, datanodeStorageInfo); if (!containsInvalidateBlock(datanodeDescriptor, b)) { addToInvalidates(b, datanodeDescriptor); LOG.debug("Excess block timeout ({}, {}) is added to invalidated.", b, datanodeDescriptor); } excessBlockInfo.setTimeStamp(); break; } } } } } finally { namesystem.writeUnlock("processTimedOutExcessBlocks"); LOG.info("processTimedOutExcessBlocks {} msecs.", (Time.monotonicNow() - now)); } }
@Test(timeout = 360000) public void testProcessTimedOutExcessBlocks() throws IOException, InterruptedException, TimeoutException { Configuration config = new HdfsConfiguration(); // Bump up replication interval. config.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 10000); // Set the excess redundancy block timeout. long timeOut = 60L; config.setLong(DFSConfigKeys.DFS_NAMENODE_EXCESS_REDUNDANCY_TIMEOUT_SEC_KEY, timeOut); DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); final Semaphore semaphore = new Semaphore(0); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(3).build()) { DistributedFileSystem fs = cluster.getFileSystem(); BlockManager blockManager = cluster.getNameNode().getNamesystem().getBlockManager(); cluster.waitActive(); final DataNodeFaultInjector injector = new DataNodeFaultInjector() { @Override public void delayDeleteReplica() { // Lets wait for the remove replica process. try { semaphore.acquire(1); } catch (InterruptedException e) { // ignore. } } }; DataNodeFaultInjector.set(injector); // Create file. Path path = new Path("/testfile"); DFSTestUtil.createFile(fs, path, 1024, (short) 3, 0); DFSTestUtil.waitReplication(fs, path, (short) 3); LocatedBlock lb = DFSTestUtil.getAllBlocks(fs, path).get(0); ExtendedBlock extendedBlock = lb.getBlock(); DatanodeInfo[] loc = lb.getLocations(); assertEquals(3, loc.length); // Set replication as 2, to choose excess. fs.setReplication(path, (short) 2); // Check excessRedundancyMap and invalidateBlocks size as 1. assertEquals(1, blockManager.getExcessBlocksCount()); assertEquals(1, blockManager.getPendingDeletionBlocksCount()); DataNode excessDn = Arrays.stream(loc). filter(datanodeInfo -> blockManager.getExcessSize4Testing( datanodeInfo.getDatanodeUuid()) > 0) .map(datanodeInfo -> cluster.getDataNode(datanodeInfo.getIpcPort())) .findFirst() .orElse(null); // Schedule blocks for deletion at excessDn. assertEquals(1, blockManager.computeInvalidateWork(1)); // Check excessRedundancyMap size as 1. assertEquals(1, blockManager.getExcessBlocksCount()); // Check invalidateBlocks size as 0. assertEquals(0, blockManager.getPendingDeletionBlocksCount()); assertNotNull(excessDn); // NameNode will ask datanode to delete replicas in heartbeat response. cluster.triggerHeartbeats(); // Wait for the datanode to process any block deletions // that have already been asynchronously queued. DataNode finalExcessDn = excessDn; GenericTestUtils.waitFor( () -> cluster.getFsDatasetTestUtils(finalExcessDn).getPendingAsyncDeletions() == 1, 100, 1000); // Restart the datanode. int ipcPort = excessDn.getDatanodeId().getIpcPort(); MiniDFSCluster.DataNodeProperties dataNodeProperties = cluster.stopDataNode( excessDn.getDatanodeId().getXferAddr()); assertTrue(cluster.restartDataNode(dataNodeProperties, true)); semaphore.release(1); cluster.waitActive(); // Check replica is exists in excessDn. excessDn = cluster.getDataNode(ipcPort); assertNotNull(cluster.getFsDatasetTestUtils(excessDn).fetchReplica(extendedBlock)); assertEquals(0, cluster.getFsDatasetTestUtils(excessDn).getPendingAsyncDeletions()); // Verify excess redundancy blocks have not timed out. blockManager.processTimedOutExcessBlocks(); assertEquals(0, blockManager.getPendingDeletionBlocksCount()); // Verify excess redundancy block time out. Thread.sleep(timeOut * 1000); blockManager.processTimedOutExcessBlocks(); // Check excessRedundancyMap and invalidateBlocks size as 1. assertEquals(1, blockManager.getExcessSize4Testing(excessDn.getDatanodeUuid())); assertEquals(1, blockManager.getExcessBlocksCount()); assertEquals(1, blockManager.getPendingDeletionBlocksCount()); // Schedule blocks for deletion. assertEquals(1, blockManager.computeInvalidateWork(1)); cluster.triggerHeartbeats(); // Make it resume the removeReplicaFromMem method. semaphore.release(1); // Wait for the datanode in the cluster to process any block // deletions that have already been asynchronously queued cluster.waitForDNDeletions(); // Trigger immediate deletion report. cluster.triggerDeletionReports(); // The replica num should be 2. assertEquals(2, DFSTestUtil.getAllBlocks(fs, path).get(0).getLocations().length); assertEquals(0, blockManager.getExcessBlocksCount()); } finally { DataNodeFaultInjector.set(oldInjector); } }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testPerQueueDisablePreemptionBroadHierarchical() { int[][] qData = new int[][] { // / A D G // B C E F H I {1000, 350, 150, 200, 400, 200, 200, 250, 100, 150 }, // abs {1000,1000,1000,1000,1000,1000,1000,1000,1000,1000 }, // maxCap {1000, 400, 200, 200, 400, 250, 150, 200, 150, 50 }, // used { 50, 0, 0, 0, 50, 0, 50, 0, 0, 0 }, // pending { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved // appA appB appC appD appE appF { 6, 2, 1, 1, 2, 1, 1, 2, 1, 1 }, // apps { -1, -1, 1, 1, -1, 1, 1, -1, 1, 1 }, // req granulrity { 3, 2, 0, 0, 2, 0, 0, 2, 0, 0 }, // subqueues }; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // queueF(appD) wants resources, Verify that resources come from queueE(appC) // because it's a sibling and queueB(appA) because queueA is over capacity. verify(mDisp, times(27)).handle(argThat(new IsPreemptionRequestFor(appA))); verify(mDisp, times(23)).handle(argThat(new IsPreemptionRequestFor(appC))); // Need to call setup() again to reset mDisp setup(); // Turn off preemption for queueB(appA) conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true); ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData); policy2.editSchedule(); // Now that queueB(appA) is not preemptable, verify that resources come // from queueE(appC) verify(mDisp, times(50)).handle(argThat(new IsPreemptionRequestFor(appC))); verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); setup(); // Turn off preemption for two of the 3 queues with over-capacity. conf.setPreemptionDisabled(QUEUE_D_QUEUE_E, true); conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true); ProportionalCapacityPreemptionPolicy policy3 = buildPolicy(qData); policy3.editSchedule(); // Verify that the request was starved out even though queueH(appE) is // over capacity. This is because queueG (queueH's parent) is NOT // overcapacity. verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); // queueB verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueC verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC))); // queueE verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appE))); // queueH verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appF))); // queueI }
public Registry() {}
@Test public void registry() { List<TestServer> masters = ImmutableList.of(new ServerC(), new ServerB(), new ServerA()); List<TestServer[]> permutations = new ArrayList<>(); computePermutations(masters.toArray(new TestServer[0]), 0, permutations); // Make sure that the registry orders the masters independently of the order in which they // are registered. for (TestServer[] permutation : permutations) { Registry<TestServer, Void> registry = new Registry<>(); for (TestServer server : permutation) { registry.add(server.getClass(), server); } Assert.assertEquals(masters, registry.getServers()); } }
public List<B2FileInfoResponse> find(final Path file) throws BackgroundException { if(log.isDebugEnabled()) { log.debug(String.format("Finding multipart uploads for %s", file)); } try { final List<B2FileInfoResponse> uploads = new ArrayList<B2FileInfoResponse>(); // This operation lists in-progress multipart uploads. An in-progress multipart upload is a // multipart upload that has been initiated, using the Initiate Multipart Upload request, but has // not yet been completed or aborted. String startFileId = null; do { final B2ListFilesResponse chunk; chunk = session.getClient().listUnfinishedLargeFiles( fileid.getVersionId(containerService.getContainer(file)), startFileId, null); for(B2FileInfoResponse upload : chunk.getFiles()) { if(file.isDirectory()) { final Path parent = new Path(containerService.getContainer(file), upload.getFileName(), EnumSet.of(Path.Type.file)).getParent(); if(new SimplePathPredicate(parent).test(file)) { uploads.add(upload); } } else { if(StringUtils.equals(upload.getFileName(), containerService.getKey(file))) { uploads.add(upload); } } } if(log.isInfoEnabled()) { log.info(String.format("Found %d previous multipart uploads for %s", uploads.size(), file)); } startFileId = chunk.getNextFileId(); } while(startFileId != null); if(log.isInfoEnabled()) { for(B2FileInfoResponse upload : uploads) { log.info(String.format("Found multipart upload %s for %s", upload, file)); } } // Uploads are listed in the order they were started, with the oldest one first uploads.sort(new Comparator<B2FileInfoResponse>() { @Override public int compare(final B2FileInfoResponse o1, final B2FileInfoResponse o2) { return o1.getUploadTimestamp().compareTo(o2.getUploadTimestamp()); } }); Collections.reverse(uploads); return uploads; } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } }
@Test public void testFind() throws Exception { final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final B2StartLargeFileResponse startResponse = session.getClient().startLargeFileUpload( new B2VersionIdProvider(session).getVersionId(bucket), file.getName(), null, Collections.emptyMap()); assertEquals(1, new B2LargeUploadPartService(session, new B2VersionIdProvider(session)).find(file).size()); session.getClient().cancelLargeFileUpload(startResponse.getFileId()); }
public static void setExceptionPredicate(Predicate<Throwable> exceptionPredicate) { AssertUtil.notNull(exceptionPredicate, "exception predicate must not be null"); Tracer.exceptionPredicate = exceptionPredicate; }
@Test(expected = IllegalArgumentException.class) public void testNull3() { Tracer.setExceptionPredicate(null); }
@Deprecated public static Type resolveLastTypeParameter(Type genericContext, Class<?> supertype) throws IllegalStateException { return Types.resolveLastTypeParameter(genericContext, supertype); }
@Test void lastTypeFromAnonymous() throws Exception { Parameterized<?> instance = new Parameterized<Reader>() {}; Type last = resolveLastTypeParameter(instance.getClass(), Parameterized.class); assertThat(last).isEqualTo(Reader.class); }
public void setEptl(ChangeRequest request) { final DocumentStatus ds = fetchStatus(request); ds.setEptl(request.getEptl()); repository.save(ds); }
@Test public void setEptl() throws Exception { final DocumentStatus dummyDocumentStatus = new DocumentStatus(); dummyDocumentStatus.setId(1L); dummyDocumentStatus.setDocType(DocTypeType.NL_RIJBEWIJS); dummyDocumentStatus.setPseudonym(pseudonym); dummyDocumentStatus.setSequenceNo("SSSSSSSSSSSSS"); dummyDocumentStatus.setStatus(StatusType.GEACTIVEERD); dummyDocumentStatus.setStatusMu(MUStatusType.ACTIEF); when(bsnkPseudonymDecryptorMock.decryptEp(anyString(), anyString(), anyString())).thenReturn(pseudonym); when(documentStatusRepositoryMock.findByPseudonymAndDocTypeAndSequenceNo(anyString(), any(DocTypeType.class), anyString())).thenReturn(Optional.of(dummyDocumentStatus)); final String eptl = "TL-ENCRYPTED"; final ChangeRequest request = new ChangeRequest(); request.setDocType(dummyDocumentStatus.getDocType()); request.setEpsc(encrypted); request.setSequenceNo(dummyDocumentStatus.getSequenceNo()); request.setEptl(eptl); documentStatusService.setEptl(request); ArgumentCaptor<DocumentStatus> argument = ArgumentCaptor.forClass(DocumentStatus.class); Mockito.verify(documentStatusRepositoryMock).save(argument.capture()); assertEquals(eptl, argument.getValue().getEptl()); }
public static String resolveHost2Address(String fqdn) { String ip = null; try { InetAddress address = InetAddress.getByName(fqdn); ip = address.getHostAddress(); } catch (UnknownHostException e) { logger.error("UnknownHostException " + fqdn, e); } return ip; }
@Test public void testResolveHost2ip() { String ip = NetUtils.resolveHost2Address("www.google.ca"); System.out.println("ip = " + ip); }
public static Read<JmsRecord> read() { return new AutoValue_JmsIO_Read.Builder<JmsRecord>() .setMaxNumRecords(Long.MAX_VALUE) .setCoder(SerializableCoder.of(JmsRecord.class)) .setCloseTimeout(DEFAULT_CLOSE_TIMEOUT) .setRequiresDeduping(false) .setMessageMapper( new MessageMapper<JmsRecord>() { @Override public JmsRecord mapMessage(Message message) throws Exception { TextMessage textMessage = (TextMessage) message; Map<String, Object> properties = new HashMap<>(); @SuppressWarnings("rawtypes") Enumeration propertyNames = textMessage.getPropertyNames(); while (propertyNames.hasMoreElements()) { String propertyName = (String) propertyNames.nextElement(); properties.put(propertyName, textMessage.getObjectProperty(propertyName)); } return new JmsRecord( textMessage.getJMSMessageID(), textMessage.getJMSTimestamp(), textMessage.getJMSCorrelationID(), textMessage.getJMSReplyTo(), textMessage.getJMSDestination(), textMessage.getJMSDeliveryMode(), textMessage.getJMSRedelivered(), textMessage.getJMSType(), textMessage.getJMSExpiration(), textMessage.getJMSPriority(), properties, textMessage.getText()); } }) .build(); }
@Test public void testCheckpointMarkSafety() throws Exception { final int messagesToProcess = 100; // we are using no prefetch here // prefetch is an ActiveMQ feature: to make efficient use of network resources the broker // utilizes a 'push' model to dispatch messages to consumers. However, in the case of our // test, it means that we can have some latency between the receiveNoWait() method used by // the consumer and the prefetch buffer populated by the broker. Using a prefetch to 0 means // that the consumer will poll for message, which is exactly what we want for the test. // We are also sending message acknowledgements synchronously to ensure that they are // processed before any subsequent assertions. Connection connection = connectionFactoryWithSyncAcksAndWithoutPrefetch.createConnection(USERNAME, PASSWORD); connection.start(); Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE); // Fill the queue with messages MessageProducer producer = session.createProducer(session.createQueue(QUEUE)); for (int i = 0; i < messagesToProcess; i++) { producer.send(session.createTextMessage("test " + i)); } producer.close(); session.close(); connection.close(); Function jmsMessageAck = getJmsMessageAck(this.connectionFactoryClass); // create a JmsIO.Read with a decorated ConnectionFactory which will introduce a delay in // sending // acknowledgements - this should help uncover threading issues around checkpoint management. JmsIO.Read spec = JmsIO.read() .withConnectionFactory( withSlowAcks(connectionFactoryWithSyncAcksAndWithoutPrefetch, jmsMessageAck)) .withUsername(USERNAME) .withPassword(PASSWORD) .withQueue(QUEUE); JmsIO.UnboundedJmsSource source = new JmsIO.UnboundedJmsSource(spec); JmsIO.UnboundedJmsReader reader = source.createReader(PipelineOptionsFactory.create(), null); // start the reader and move to the first record assertTrue(reader.start()); // consume half the messages (NB: start already consumed the first message) for (int i = 0; i < (messagesToProcess / 2) - 1; i++) { assertTrue(reader.advance()); } // the messages are still pending in the queue (no ACK yet) assertEquals(messagesToProcess, count(QUEUE)); // we finalize the checkpoint for the already-processed messages while simultaneously consuming // the remainder of // messages from the queue Thread runner = new Thread( () -> { try { for (int i = 0; i < messagesToProcess / 2; i++) { assertTrue(reader.advance()); } } catch (IOException ex) { throw new RuntimeException(ex); } }); runner.start(); reader.getCheckpointMark().finalizeCheckpoint(); // Concurrency issues would cause an exception to be thrown before this method exits, failing // the test runner.join(); }
@Override public List<Catalogue> sort(List<Catalogue> catalogueTree, SortTypeEnum sortTypeEnum) { log.debug("sort catalogue tree based on id. catalogueTree: {}, sortTypeEnum: {}", catalogueTree, sortTypeEnum); return recursionSortCatalogues(catalogueTree, sortTypeEnum); }
@Test public void sortAscTest() { SortTypeEnum sortTypeEnum = SortTypeEnum.ASC; List<Catalogue> catalogueTree = Lists.newArrayList(); Catalogue catalogue = new Catalogue(); catalogue.setId(1); Catalogue catalogue11 = new Catalogue(); catalogue11.setId(2); Catalogue catalogue12 = new Catalogue(); catalogue12.setId(3); catalogue.setChildren(Lists.newArrayList(catalogue12, catalogue11)); Catalogue catalogue2 = new Catalogue(); catalogue2.setId(4); Catalogue catalogue21 = new Catalogue(); catalogue21.setId(7); Catalogue catalogue22 = new Catalogue(); catalogue22.setId(6); catalogue2.setChildren(Lists.newArrayList(catalogue21, catalogue22)); catalogueTree.add(catalogue2); catalogueTree.add(catalogue); /* input: -- 4 -- 7 -- 6 -- 1 -- 3 -- 2 output: -- 1 -- 2 -- 3 -- 4 -- 6 -- 7 */ List<Catalogue> resultList = catalogueTreeSortDefaultStrategyTest.sort(catalogueTree, sortTypeEnum); List<Integer> resultIdList = CategoryTreeSortStrategyTestUtils.breadthTraverse(resultList); assertEquals(Lists.newArrayList(1, 4, 2, 3, 6, 7), resultIdList); }
@Description("Given a Bing tile, returns zoom level of the tile") @ScalarFunction("bing_tile_zoom_level") @SqlType(StandardTypes.TINYINT) public static long bingTileZoomLevel(@SqlType(BingTileType.NAME) long input) { return BingTile.decode(input).getZoomLevel(); }
@Test public void testBingTileZoomLevel() { assertFunction("bing_tile_zoom_level(bing_tile('213'))", TINYINT, (byte) 3); assertFunction("bing_tile_zoom_level(bing_tile('123030123010121'))", TINYINT, (byte) 15); }
@Udf public <T> List<T> intersect( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> intersection = Sets.newLinkedHashSet(left); intersection.retainAll(Sets.newHashSet(right)); return Lists.newArrayList(intersection); }
@Test public void shouldReturnNullForNullExceptionArray() { final List<String> input2 = Arrays.asList("foo"); final List<String> result = udf.intersect(null, input2); assertThat(result, is(nullValue())); }
public static String composeFullyQualifiedTableName(String catalog, String schema, String tableName, char separator) { StringBuilder sb = new StringBuilder(); if (stringHasValue(catalog)) { sb.append(catalog); sb.append(separator); } if (stringHasValue(schema)) { sb.append(schema); sb.append(separator); } else { if (sb.length() > 0) { sb.append(separator); } } sb.append(tableName); return sb.toString(); }
@Test void testNoCatalog() { String answer = StringUtility.composeFullyQualifiedTableName(null, "schema", "table", '.'); assertEquals("schema.table", answer); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { int type = columnDef.getColumnMeta() >> 8; int length = columnDef.getColumnMeta() & 0xff; // unpack type & length, see https://bugs.mysql.com/bug.php?id=37426. if (0x30 != (type & 0x30)) { length += ((type & 0x30) ^ 0x30) << 4; type |= 0x30; } switch (MySQLBinaryColumnType.valueOf(type)) { case ENUM: return readEnumValue(length, payload); case SET: return payload.getByteBuf().readByte(); case STRING: return new MySQLBinaryString(payload.readStringFixByBytes(readActualLength(length, payload))); default: throw new UnsupportedSQLOperationException(MySQLBinaryColumnType.valueOf(type).toString()); } }
@Test void assertReadEnumValueWithMeta1() { columnDef.setColumnMeta((MySQLBinaryColumnType.ENUM.getValue() << 8) + 1); when(payload.readInt1()).thenReturn(1); assertThat(new MySQLStringBinlogProtocolValue().read(columnDef, payload), is(1)); }
protected static void checkPayload(Channel channel, long size) throws IOException { int payload = getPayload(channel); boolean overPayload = isOverPayload(payload, size); if (overPayload) { ExceedPayloadLimitException e = new ExceedPayloadLimitException( "Data length too large: " + size + ", max payload: " + payload + ", channel: " + channel); logger.error(TRANSPORT_EXCEED_PAYLOAD_LIMIT, "", "", e.getMessage(), e); throw e; } }
@Test void testCheckProviderPayload() throws Exception { Channel channel = mock(Channel.class); given(channel.getUrl()).willReturn(URL.valueOf("dubbo://1.1.1.1")); AbstractCodec.checkPayload(channel, 1024 * 1024 + 1, 1024 * 1024); try { AbstractCodec.checkPayload(channel, 1024 * 1024, 1024 * 1024); } catch (IOException expected) { assertThat( expected.getMessage(), allOf(containsString("Data length too large: "), containsString("max payload: " + 1024 * 1024))); } try { AbstractCodec.checkPayload(channel, 0, 15 * 1024 * 1024); } catch (IOException expected) { assertThat( expected.getMessage(), allOf( containsString("Data length too large: "), containsString("max payload: " + 8 * 1024 * 1024))); } verify(channel, VerificationModeFactory.atLeastOnce()).getUrl(); }
@Override public ParDoFn create( PipelineOptions options, CloudObject cloudUserFn, List<SideInputInfo> sideInputInfos, TupleTag<?> mainOutputTag, Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { Coder<?> coder = CloudObjects.coderFromCloudObject( CloudObject.fromSpec(Structs.getObject(cloudUserFn, PropertyNames.ENCODING))); checkState( coder instanceof IsmRecordCoder, "Expected to received an instanceof an IsmRecordCoder but got %s", coder); return new CreateIsmShardKeyAndSortKeyParDoFn((IsmRecordCoder<?>) coder); }
@Test public void testConversionOfRecord() throws Exception { ParDoFn parDoFn = new CreateIsmShardKeyAndSortKeyDoFnFactory() .create( null /* pipeline options */, CloudObject.fromSpec( ImmutableMap.of( PropertyNames.OBJECT_TYPE_NAME, "CreateIsmShardKeyAndSortKeyDoFn", PropertyNames.ENCODING, createIsmRecordEncoding())), null /* side input infos */, null /* main output tag */, null /* output tag to receiver index */, null /* exection context */, null /* operation context */); List<Object> outputReceiver = new ArrayList<>(); parDoFn.startBundle(outputReceiver::add); parDoFn.processElement(valueInGlobalWindow(KV.of(42, 43))); IsmRecordCoder<?> coder = (IsmRecordCoder) CloudObjects.coderFromCloudObject(CloudObject.fromSpec(createIsmRecordEncoding())); assertThat( outputReceiver, contains( valueInGlobalWindow( KV.of( coder.hash(ImmutableList.of(42)) /* hash key */, KV.of(KV.of(42, GlobalWindow.INSTANCE) /* sort key */, 43 /* value */))))); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) { return containsAtLeastEntriesIn(accumulateMap("containsAtLeast", k0, v0, rest)); }
@Test public void containsAtLeastWrongValueWithNull() { // Test for https://github.com/google/truth/issues/468 ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2, "march", 3); expectFailureWhenTestingThat(actual).containsAtLeast("jan", 1, "march", null); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "---", "expected to contain at least", "but was"); assertFailureValue("for key", "march"); assertFailureValue("expected value", "null"); assertFailureValue("but got value", "3"); }
public final void isNotEmpty() { if (checkNotNull(actual).isEmpty()) { failWithoutActual(simpleFact("expected not to be empty")); } }
@Test public void isNotEmpty() { ImmutableMap<Integer, Integer> actual = ImmutableMap.of(1, 5); assertThat(actual).isNotEmpty(); }
public long encodeToLong(List<Integer> input) { checkEncodeInputValidity(input, Long.SIZE); return zOrderByteAddressToLong(encodeToByteArray(input)); }
@Test public void testZOrderOverLong() { List<Integer> bitPositions = ImmutableList.of(16, 16, 16, 16, 16); int totalBitLength = bitPositions.stream().mapToInt(Integer::intValue).sum(); ZOrder zOrder = new ZOrder(bitPositions); List<Integer> intColumns = ImmutableList.of(20456, 20456, 20456, 20456, 20456); try { zOrder.encodeToLong(intColumns); fail("Expected test to fail: total bits to encode is larger than the size of a long."); } catch (IllegalArgumentException e) { String expectedMessage = format("The z-address type specified is not large enough to hold %d values with a total of %d bits.", bitPositions.size(), totalBitLength); assertEquals(e.getMessage(), expectedMessage, format("Expected exception message '%s' to match '%s'", e.getMessage(), expectedMessage)); } }
public static ListOptions defaults() { return new ListOptions(); }
@Test public void defaults() throws IOException { ListOptions options = ListOptions.defaults(); assertEquals(false, options.isRecursive()); }
public synchronized <K> KeyQueryMetadata getKeyQueryMetadataForKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); if (topologyMetadata.hasNamedTopologies()) { throw new IllegalArgumentException("Cannot invoke the getKeyQueryMetadataForKey(storeName, key, keySerializer)" + "method when using named topologies, please use the overload that" + "accepts a topologyName parameter to identify the correct store"); } return getKeyQueryMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer)); }
@Test public void shouldReturnNullOnGetWithKeyWhenStoreDoesntExist() { final KeyQueryMetadata actual = metadataState.getKeyQueryMetadataForKey("not-a-store", "key", Serdes.String().serializer()); assertNull(actual); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { // Get origin caller. String origin = DubboAdapterGlobalConfig.getOriginParser().parse(invoker, invocation); if (null == origin) { origin = ""; } Entry interfaceEntry = null; Entry methodEntry = null; try { String prefix = DubboAdapterGlobalConfig.getDubboProviderPrefix(); String methodResourceName = getMethodResourceName(invoker, invocation, prefix); String interfaceName = getInterfaceName(invoker, prefix); ContextUtil.enter(methodResourceName, origin); interfaceEntry = SphU.entry(interfaceName, ResourceTypeConstants.COMMON_RPC, EntryType.IN); methodEntry = SphU.entry(methodResourceName, ResourceTypeConstants.COMMON_RPC, EntryType.IN, invocation.getArguments()); Result result = invoker.invoke(invocation); if (result.hasException()) { Throwable e = result.getException(); // Record common exception. Tracer.traceEntry(e, interfaceEntry); Tracer.traceEntry(e, methodEntry); } return result; } catch (BlockException e) { return DubboAdapterGlobalConfig.getProviderFallback().handle(invoker, invocation, e); } catch (RpcException e) { Tracer.traceEntry(e, interfaceEntry); Tracer.traceEntry(e, methodEntry); throw e; } finally { if (methodEntry != null) { methodEntry.exit(1, invocation.getArguments()); } if (interfaceEntry != null) { interfaceEntry.exit(); } ContextUtil.exit(); } }
@Test public void testInvoke() { final String originApplication = "consumerA"; final Invoker invoker = mock(Invoker.class); when(invoker.getInterface()).thenReturn(DemoService.class); final Invocation invocation = mock(Invocation.class); Method method = DemoService.class.getMethods()[0]; when(invocation.getMethodName()).thenReturn(method.getName()); when(invocation.getParameterTypes()).thenReturn(method.getParameterTypes()); when(invocation.getAttachment(DubboUtils.DUBBO_APPLICATION_KEY, "")).thenReturn(originApplication); final Result result = mock(Result.class); when(result.hasException()).thenReturn(false); when(invoker.invoke(invocation)).thenAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { verifyInvocationStructure(originApplication, invoker, invocation); return result; } }); filter.invoke(invoker, invocation); verify(invoker).invoke(invocation); Context context = ContextUtil.getContext(); assertNull(context); }
static File resolveTempDir(Properties p) { return new File(Optional.ofNullable(p.getProperty("sonar.path.temp")).orElse("temp")); }
@Test public void resolveTempDir_reads_absolute_temp_dir_location_from_sonar_path_temp() throws IOException { File tempDirLocation = temporaryFolder.newFolder(); Properties properties = new Properties(); properties.put("sonar.path.temp", tempDirLocation.getAbsolutePath()); File file = Shutdowner.resolveTempDir(properties); assertThat(file).isEqualTo(tempDirLocation); }
public String toPrompt(List<Message> messages) { final String systemMessages = messages.stream() .filter(message -> message.getMessageType() == MessageType.SYSTEM) .map(Message::getContent) .collect(Collectors.joining("\n")); final String userMessages = messages.stream() .filter(message -> message.getMessageType() == MessageType.USER || message.getMessageType() == MessageType.ASSISTANT) .map(this::messageToString) .collect(Collectors.joining("\n")); return String.format("%s%n%n%s%n%s", systemMessages, userMessages, assistantPrompt).trim(); }
@Test public void testSystemMessageList() { String msg = "this is a LLM prompt"; SystemMessage message = new SystemMessage(msg); Assert.assertEquals(msg, converter.toPrompt(List.of(message))); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } CachedQueryEntry<?, ?> that = (CachedQueryEntry<?, ?>) o; return keyData.equals(that.keyData); }
@Test public void testEquals_givenOtherIsDifferentClass_thenReturnFalse() { CachedQueryEntry entry1 = createEntry("key"); Object entry2 = new Object(); assertFalse(entry1.equals(entry2)); }
@Override public void recordLoadFailure(long loadTime) { loadFailure.update(loadTime, TimeUnit.NANOSECONDS); totalLoadTime.add(loadTime); }
@Test public void loadFailure() { stats.recordLoadFailure(256); assertThat(registry.timer(PREFIX + ".loads-failure").getCount()).isEqualTo(1); }
@Override public AlarmInfo clear(Alarm alarm, User user) throws ThingsboardException { return clear(alarm, System.currentTimeMillis(), user); }
@Test public void testClear() throws ThingsboardException { var alarm = new Alarm(); alarm.setAcknowledged(true); when(alarmSubscriptionService.clearAlarm(any(), any(), anyLong(), any())) .thenReturn(AlarmApiCallResult.builder().successful(true).cleared(true).alarm(new AlarmInfo()).build()); service.clear(alarm, new User(new UserId(UUID.randomUUID()))); verify(alarmCommentService, times(1)).saveAlarmComment(any(), any(), any()); verify(logEntityActionService, times(1)).logEntityAction(any(), any(), any(), any(), eq(ActionType.ALARM_CLEAR), any()); verify(alarmSubscriptionService, times(1)).clearAlarm(any(), any(), anyLong(), any()); }
@Override public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions, final CreatePartitionsOptions options) { final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size()); final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size()); for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) { final String topic = entry.getKey(); final NewPartitions newPartition = entry.getValue(); List<List<Integer>> newAssignments = newPartition.assignments(); List<CreatePartitionsAssignment> assignments = newAssignments == null ? null : newAssignments.stream() .map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds)) .collect(Collectors.toList()); topics.add(new CreatePartitionsTopic() .setName(topic) .setCount(newPartition.totalCount()) .setAssignments(assignments)); futures.put(topic, new KafkaFutureImpl<>()); } if (!topics.isEmpty()) { final long now = time.milliseconds(); final long deadline = calcDeadlineMs(now, options.timeoutMs()); final Call call = getCreatePartitionsCall(options, futures, topics, Collections.emptyMap(), now, deadline); runnable.call(call, now); } return new CreatePartitionsResult(new HashMap<>(futures)); }
@Test public void testCreatePartitionsRetryThrottlingExceptionWhenEnabledUntilRequestTimeOut() throws Exception { long defaultApiTimeout = 60000; MockTime time = new MockTime(); try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, String.valueOf(defaultApiTimeout))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectCreatePartitionsRequestWithTopics("topic1", "topic2", "topic3"), prepareCreatePartitionsResponse(1000, createPartitionsTopicResult("topic1", Errors.NONE), createPartitionsTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), createPartitionsTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS))); env.kafkaClient().prepareResponse( expectCreatePartitionsRequestWithTopics("topic2"), prepareCreatePartitionsResponse(1000, createPartitionsTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED))); Map<String, NewPartitions> counts = new HashMap<>(); counts.put("topic1", NewPartitions.increaseTo(1)); counts.put("topic2", NewPartitions.increaseTo(2)); counts.put("topic3", NewPartitions.increaseTo(3)); CreatePartitionsResult result = env.adminClient().createPartitions( counts, new CreatePartitionsOptions().retryOnQuotaViolation(true)); // Wait until the prepared attempts have consumed TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting CreatePartitions requests"); // Wait until the next request is sent out TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting next CreatePartitions request"); // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); assertNull(result.values().get("topic1").get()); ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"), ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } }
public static boolean isEmpty( CharSequence val ) { return val == null || val.length() == 0; }
@Test public void testIsEmpty() { assertTrue( Utils.isEmpty( (String) null ) ); assertTrue( Utils.isEmpty( "" ) ); assertFalse( Utils.isEmpty( "test" ) ); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildOrderByStreamMergedResultWithOracleLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle")); final ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); WhereSegment whereSegment = mock(WhereSegment.class); BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class); when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id"))); when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, 1L)); when(binaryOperationExpression.getOperator()).thenReturn(">="); when(whereSegment.getExpr()).thenReturn(binaryOperationExpression); SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class); SubquerySegment subquerySegment = mock(SubquerySegment.class); SelectStatement subSelectStatement = mock(MySQLSelectStatement.class); ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class); TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class); when(topProjectionSegment.getAlias()).thenReturn("row_id"); when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment)); when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment); when(subquerySegment.getSelect()).thenReturn(subSelectStatement); when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment); OracleSelectStatement selectStatement = (OracleSelectStatement) buildSelectStatement(new OracleSelectStatement()); selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setFrom(subqueryTableSegment); selectStatement.setWhere(whereSegment); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(RowNumberDecoratorMergedResult.class)); assertThat(((RowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(OrderByStreamMergedResult.class)); }
@Override public Column convert(BasicTypeDefine typeDefine) { Long typeDefineLength = typeDefine.getLength(); PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .columnLength(typeDefineLength) .scale(typeDefine.getScale()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String irisDataType = typeDefine.getDataType().toUpperCase(); long charOrBinaryLength = Objects.nonNull(typeDefineLength) && typeDefineLength > 0 ? typeDefineLength : 1; switch (irisDataType) { case IRIS_NULL: builder.dataType(BasicType.VOID_TYPE); break; case IRIS_BIT: builder.dataType(BasicType.BOOLEAN_TYPE); break; case IRIS_NUMERIC: case IRIS_MONEY: case IRIS_SMALLMONEY: case IRIS_NUMBER: case IRIS_DEC: case IRIS_DECIMAL: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case IRIS_INT: case IRIS_INTEGER: case IRIS_MEDIUMINT: builder.dataType(BasicType.INT_TYPE); break; case IRIS_ROWVERSION: case IRIS_BIGINT: case IRIS_SERIAL: builder.dataType(BasicType.LONG_TYPE); break; case IRIS_TINYINT: builder.dataType(BasicType.BYTE_TYPE); break; case IRIS_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case IRIS_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case IRIS_DOUBLE: case IRIS_REAL: case IRIS_DOUBLE_PRECISION: builder.dataType(BasicType.DOUBLE_TYPE); break; case IRIS_CHAR: case IRIS_CHAR_VARYING: case IRIS_CHARACTER_VARYING: case IRIS_NATIONAL_CHAR: case IRIS_NATIONAL_CHAR_VARYING: case IRIS_NATIONAL_CHARACTER: case IRIS_NATIONAL_CHARACTER_VARYING: case IRIS_NATIONAL_VARCHAR: case IRIS_NCHAR: case IRIS_SYSNAME: case IRIS_VARCHAR2: case IRIS_VARCHAR: case IRIS_NVARCHAR: case IRIS_UNIQUEIDENTIFIER: case IRIS_GUID: case IRIS_CHARACTER: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(charOrBinaryLength); break; case IRIS_NTEXT: case IRIS_CLOB: case IRIS_LONG_VARCHAR: case IRIS_LONG: case IRIS_LONGTEXT: case IRIS_MEDIUMTEXT: case IRIS_TEXT: case IRIS_LONGVARCHAR: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(Long.valueOf(Integer.MAX_VALUE)); break; case IRIS_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case IRIS_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); break; case IRIS_DATETIME: case IRIS_DATETIME2: case IRIS_SMALLDATETIME: case IRIS_TIMESTAMP: case IRIS_TIMESTAMP2: case IRIS_POSIXTIME: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; case IRIS_BINARY: case IRIS_BINARY_VARYING: case IRIS_RAW: case IRIS_VARBINARY: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(charOrBinaryLength); break; case IRIS_LONGVARBINARY: case IRIS_BLOB: case IRIS_IMAGE: case IRIS_LONG_BINARY: case IRIS_LONG_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(Long.valueOf(Integer.MAX_VALUE)); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.IRIS, irisDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertFloat() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("float") .dataType("float") .build(); Column column = IrisTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.FLOAT_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public static <T> Entity parse(T bean) { return create(null).parseBean(bean); }
@Test public void parseTest() { User user = new User(); user.setId(1); user.setName("test"); Entity entity = Entity.create("testTable").parseBean(user); assertEquals(Integer.valueOf(1), entity.getInt("id")); assertEquals("test", entity.getStr("name")); }
@Override public FinishedTriggersBitSet copy() { return new FinishedTriggersBitSet((BitSet) bitSet.clone()); }
@Test public void testCopy() throws Exception { FinishedTriggersBitSet finishedSet = FinishedTriggersBitSet.emptyWithCapacity(10); assertThat(finishedSet.copy().getBitSet(), not(theInstance(finishedSet.getBitSet()))); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "logs/search") @Operation(tags = {"Logs"}, summary = "Search for logs") public PagedResults<LogEntry> find( @Parameter(description = "A string filter") @Nullable @QueryValue(value = "q") String query, @Parameter(description = "The current page") @QueryValue(defaultValue = "1") int page, @Parameter(description = "The current page size") @QueryValue(defaultValue = "10") int size, @Parameter(description = "The sort of current page") @Nullable @QueryValue List<String> sort, @Parameter(description = "A namespace filter prefix") @Nullable @QueryValue String namespace, @Parameter(description = "A flow id filter") @Nullable @QueryValue String flowId, @Parameter(description = "A trigger id filter") @Nullable @QueryValue String triggerId, @Parameter(description = "The min log level filter") @Nullable @QueryValue Level minLevel, @Parameter(description = "The start datetime") @Nullable @Format("yyyy-MM-dd'T'HH:mm[:ss][.SSS][XXX]") @QueryValue ZonedDateTime startDate, @Parameter(description = "The end datetime") @Nullable @Format("yyyy-MM-dd'T'HH:mm[:ss][.SSS][XXX]") @QueryValue ZonedDateTime endDate ) { validateTimeline(startDate, endDate); return PagedResults.of( logRepository.find(PageableUtils.from(page, size, sort), query, tenantService.resolveTenant(), namespace, flowId, triggerId, minLevel, startDate, endDate) ); }
@SuppressWarnings("unchecked") @Test void find() { LogEntry log1 = logEntry(Level.INFO); LogEntry log2 = logEntry(Level.WARN); LogEntry log3 = logEntry(Level.DEBUG); logRepository.save(log1); logRepository.save(log2); logRepository.save(log3); PagedResults<LogEntry> logs = client.toBlocking().retrieve( HttpRequest.GET("/api/v1/logs/search"), Argument.of(PagedResults.class, LogEntry.class) ); assertThat(logs.getTotal(), is(3L)); logs = client.toBlocking().retrieve( HttpRequest.GET("/api/v1/logs/search?minLevel=INFO"), Argument.of(PagedResults.class, LogEntry.class) ); assertThat(logs.getTotal(), is(2L)); }
public static MaterializedDataPredicates getMaterializedDataPredicates( SemiTransactionalHiveMetastore metastore, MetastoreContext metastoreContext, TypeManager typeManager, Table table, DateTimeZone timeZone) { List<Column> partitionColumns = table.getPartitionColumns(); for (Column partitionColumn : partitionColumns) { HiveType hiveType = partitionColumn.getType(); if (!hiveType.isSupportedType()) { throw new PrestoException( NOT_SUPPORTED, String.format("Unsupported Hive type %s found in partition keys of table %s.%s", hiveType, table.getDatabaseName(), table.getTableName())); } } List<HiveColumnHandle> partitionKeyColumnHandles = getPartitionKeyColumnHandles(table); Map<String, Type> partitionTypes = partitionKeyColumnHandles.stream() .collect(toImmutableMap(HiveColumnHandle::getName, column -> typeManager.getType(column.getTypeSignature()))); List<PartitionNameWithVersion> partitionNames = metastore.getPartitionNames(metastoreContext, table.getDatabaseName(), table.getTableName()) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(table.getDatabaseName(), table.getTableName()))); ImmutableList.Builder<TupleDomain<String>> partitionNamesAndValues = ImmutableList.builder(); for (PartitionNameWithVersion partitionName : partitionNames) { ImmutableMap.Builder<String, NullableValue> partitionNameAndValuesMap = ImmutableMap.builder(); Map<String, String> partitions = toPartitionNamesAndValues(partitionName.getPartitionName()); if (partitionColumns.size() != partitions.size()) { throw new PrestoException(HIVE_INVALID_METADATA, String.format( "Expected %d partition key values, but got %d", partitionColumns.size(), partitions.size())); } partitionTypes.forEach((name, type) -> { String value = partitions.get(name); if (value == null) { throw new PrestoException(HIVE_INVALID_PARTITION_VALUE, String.format("partition key value cannot be null for field: %s", name)); } partitionNameAndValuesMap.put(name, parsePartitionValue(name, value, type, timeZone)); }); TupleDomain<String> tupleDomain = TupleDomain.fromFixedValues(partitionNameAndValuesMap.build()); partitionNamesAndValues.add(tupleDomain); } return new MaterializedDataPredicates(partitionNamesAndValues.build(), partitionColumns.stream() .map(Column::getName) .collect(toImmutableList())); }
@Test public void testMaterializedDataPredicatesWithNullPartitions() { TestingTypeManager typeManager = new TestingTypeManager(); TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create(); List<String> keys = ImmutableList.of("ds", "category"); Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty()); Column categoryColumn = new Column("category", HIVE_STRING, Optional.empty(), Optional.empty()); List<Column> partitionColumns = ImmutableList.of(dsColumn, categoryColumn); List<String> partitions = ImmutableList.of( "ds=2020-01-01/category=c1", "ds=2020-01-01/category=" + HIVE_DEFAULT_DYNAMIC_PARTITION, "ds=2020-01-02/category=c1", "ds=" + HIVE_DEFAULT_DYNAMIC_PARTITION + "/category=c2"); testMetastore.setPartitionNames(partitions); ImmutableList.Builder<List<TestingPartitionResult>> partitionResults = ImmutableList.builder(); partitionResults.add(ImmutableList.of( new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-01' AS varchar)"), new TestingPartitionResult("category", VARCHAR, "CAST('c1' AS varchar)"))); partitionResults.add(ImmutableList.of( new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-01' AS varchar)"), new TestingPartitionResult("category", VARCHAR, "CAST(null AS varchar)"))); partitionResults.add(ImmutableList.of( new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-02' AS varchar)"), new TestingPartitionResult("category", VARCHAR, "CAST('c1' AS varchar)"))); partitionResults.add(ImmutableList.of( new TestingPartitionResult("ds", VARCHAR, "CAST(null AS varchar)"), new TestingPartitionResult("category", VARCHAR, "CAST('c2' AS varchar)"))); MaterializedDataPredicates materializedDataPredicates = getMaterializedDataPredicates(testMetastore, metastoreContext, typeManager, getTable(partitionColumns), DateTimeZone.UTC); comparePredicates(materializedDataPredicates, keys, partitionResults.build()); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testDisablingBufferTimeoutWithPipelinedExchanges() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRuntimeMode(RuntimeExecutionMode.STREAMING); env.setBufferTimeout(-1); env.fromData(1, 2, 3).map(value -> value).print(); final JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()); for (JobVertex vertex : jobGraph.getVertices()) { final StreamConfig streamConfig = new StreamConfig(vertex.getConfiguration()); for (NonChainedOutput output : streamConfig.getVertexNonChainedOutputs(this.getClass().getClassLoader())) { assertThat(output.getBufferTimeout()).isEqualTo(-1L); } } }
@ApiOperation(value = "่Žทๅ–ๆŽฅๅฃ่ต„ๆบๅˆ†้กตๅˆ—่กจ") @GetMapping("/page") public ApiPageResult<IPage<BaseAction>> page(PageForm pageForm){ return ApiPageResult.success(baseActionService.page(new Page(pageForm.getPageNum(), pageForm.getPageSize()))); }
@Test void testPage() { }
@Override public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos, String branch) { if (commitInfos.isEmpty()) { LOG.warn("No commit info on {}.{} after hive sink", dbName, tableName); return; } HiveTable table = (HiveTable) getTable(dbName, tableName); String stagingDir = commitInfos.get(0).getStaging_dir(); boolean isOverwrite = commitInfos.get(0).isIs_overwrite(); List<PartitionUpdate> partitionUpdates = commitInfos.stream() .map(TSinkCommitInfo::getHive_file_info) .map(fileInfo -> PartitionUpdate.get(fileInfo, stagingDir, table.getTableLocation())) .collect(Collectors.collectingAndThen(Collectors.toList(), PartitionUpdate::merge)); List<String> partitionColNames = table.getPartitionColumnNames(); for (PartitionUpdate partitionUpdate : partitionUpdates) { PartitionUpdate.UpdateMode mode; if (table.isUnPartitioned()) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; partitionUpdate.setUpdateMode(mode); break; } else { List<String> partitionValues = toPartitionValues(partitionUpdate.getName()); Preconditions.checkState(partitionColNames.size() == partitionValues.size(), "Partition columns names size doesn't equal partition values size. %s vs %s", partitionColNames.size(), partitionValues.size()); if (hmsOps.partitionExists(table, partitionValues)) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; } else { mode = PartitionUpdate.UpdateMode.NEW; } partitionUpdate.setUpdateMode(mode); } } HiveCommitter committer = new HiveCommitter( hmsOps, fileOps, updateExecutor, refreshOthersFeExecutor, table, new Path(stagingDir)); try (Timer ignored = Tracers.watchScope(EXTERNAL, "HIVE.SINK.commit")) { committer.commit(partitionUpdates); } }
@Test(expected = StarRocksConnectorException.class) public void testFinishSink() { String stagingDir = "hdfs://127.0.0.1:10000/tmp/starrocks/queryid"; THiveFileInfo fileInfo = new THiveFileInfo(); fileInfo.setFile_name("myfile.parquet"); fileInfo.setPartition_path("hdfs://127.0.0.1:10000/tmp/starrocks/queryid/col1=2"); fileInfo.setRecord_count(10); fileInfo.setFile_size_in_bytes(100); TSinkCommitInfo tSinkCommitInfo = new TSinkCommitInfo(); tSinkCommitInfo.setStaging_dir(stagingDir); tSinkCommitInfo.setIs_overwrite(false); tSinkCommitInfo.setHive_file_info(fileInfo); hiveMetadata.finishSink("hive_db", "hive_table", Lists.newArrayList(), null); hiveMetadata.finishSink("hive_db", "hive_table", Lists.newArrayList(tSinkCommitInfo), null); }
public static void validateVCores(Configuration conf) { int minVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); int maxVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); if (minVcores <= 0 || minVcores > maxVcores) { throw new YarnRuntimeException("Invalid resource scheduler vcores" + " allocation configuration" + ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES + "=" + minVcores + ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + "=" + maxVcores + ", min and max should be greater than 0" + ", max should be no smaller than min."); } }
@Test public void testValidateVCores() { Map<String, String> configs = new HashMap(); configs.put(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, "1"); configs.put(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, "4"); Configuration config = CapacitySchedulerConfigGeneratorForTest .createConfiguration(configs); // there is no need for assertion, since there is no further method call // inside the tested code and in case of a valid configuration no exception // is thrown CapacitySchedulerConfigValidator.validateVCores(config); }
@Override public void clear() { map.clear(); }
@Test public void testClear() { ExtendedSet<TestValue> set = new ExtendedSet<>(Maps.newConcurrentMap()); TestValue val = new TestValue("foo", 1); assertTrue(set.add(val)); assertTrue(set.contains(val)); set.clear(); assertFalse(set.contains(val)); }
@Override public HttpResponseOutputStream<FileEntity> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final MultipartOutputStream proxy = new MultipartOutputStream(file, status); return new HttpResponseOutputStream<FileEntity>(new MemorySegementingOutputStream(proxy, partsize), new BrickAttributesFinderFeature(session), status) { @Override public FileEntity getStatus() { return proxy.getResponse(); } }; }
@Test public void testWriteSmallPart() throws Exception { final BrickMultipartWriteFeature feature = new BrickMultipartWriteFeature(session, 5 * 1024 * 1024); final Path container = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final TransferStatus status = new TransferStatus(); status.setLength(-1L); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<FileEntity> out = feature.write(file, status, new DisabledConnectionCallback()); final byte[] content = RandomUtils.nextBytes(56); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); assertTrue(new BrickFindFeature(session).find(file)); final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(file); assertEquals(new BrickAttributesFinderFeature(session).toAttributes(out.getStatus()), attributes); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new BrickReadFeature(session).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new BrickDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Udf public String uuid() { return java.util.UUID.randomUUID().toString(); }
@Test public void shouldReturnCorrectOutputFormat() { // aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee final String anUuid = udf.uuid(); final java.util.UUID uuid = java.util.UUID.fromString(anUuid); final ByteBuffer bytes = ByteBuffer.wrap(new byte[16]); bytes.putLong(uuid.getMostSignificantBits()); bytes.putLong(uuid.getLeastSignificantBits()); byte[] byteArrays = bytes.array(); final String toUuid = udf.uuid(ByteBuffer.wrap(byteArrays)); assertThat(toUuid, is(anUuid)); }
@Override public V get(final K key) { Objects.requireNonNull(key); final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); for (final ReadOnlyKeyValueStore<K, V> store : stores) { try { final V result = store.get(key); if (result != null) { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } return null; }
@Test public void shouldThrowInvalidStoreExceptionDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().get("anything")); }
@Override public Optional<CeWorker> getCeWorkerIn(Thread thread) { return workerStatuses.keySet().stream() .filter(t -> t.isExecutedBy(thread)) .findFirst(); }
@Test public void getCeWorkerIn_returns_empty_if_worker_is_unregistered_in_CeWorkerController() { CeWorker ceWorker = mock(CeWorker.class); Thread currentThread = Thread.currentThread(); Thread otherThread = new Thread(); mockWorkerIsRunningOnNoThread(ceWorker); assertThat(underTest.getCeWorkerIn(currentThread)).isEmpty(); assertThat(underTest.getCeWorkerIn(otherThread)).isEmpty(); mockWorkerIsRunningOnThread(ceWorker, currentThread); assertThat(underTest.getCeWorkerIn(currentThread)).isEmpty(); assertThat(underTest.getCeWorkerIn(otherThread)).isEmpty(); mockWorkerIsRunningOnThread(ceWorker, otherThread); assertThat(underTest.getCeWorkerIn(currentThread)).isEmpty(); assertThat(underTest.getCeWorkerIn(otherThread)).isEmpty(); }
public void setName( String name, boolean renameHomeDirectories ) throws Exception { if ( getDirectory().getName().equalsIgnoreCase( name ) ) { return; } if ( rep instanceof RepositoryExtended ) { ( (RepositoryExtended) rep ).renameRepositoryDirectory( getDirectory().getObjectId(), null, name, renameHomeDirectories ); } else { rep.renameRepositoryDirectory( getDirectory().getObjectId(), null, name ); } // Update the object reference so the new name is displayed obj = rep.getObjectInformation( getObjectId(), getRepositoryElementType() ); refresh(); }
@Test public void testSetName() throws Exception { final String newDirName = "foo"; when( mockRepositoryDirectory.getName() ).thenReturn( "dirName" ); uiRepDir.setName( newDirName, true ); verify( mockRepository ).renameRepositoryDirectory( mockRepositoryDirectory.getObjectId(), null, newDirName ); uiPurRepDir.setName( newDirName, true ); verify( mockPurRepository ).renameRepositoryDirectory( mockRepositoryDirectory.getObjectId(), null, newDirName, true ); }
public void add(String headerName, String value) { String normalName = HeaderName.normalize(Objects.requireNonNull(headerName, "headerName")); Objects.requireNonNull(value, "value"); addNormal(headerName, normalName, value); }
@Test void add() { Headers headers = new Headers(); headers.add("Via", "duct"); headers.add("Cookie", "this=that"); headers.add("Cookie", "frizzle=frazzle"); headers.add("via", "con Dios"); Truth.assertThat(headers.getAll("Via")) .containsExactly("duct", "con Dios") .inOrder(); }
@Override public void prepare() throws ServiceNotProvidedException, ModuleStartException { try { List<Address> addressList = ConnectUtils.parse(config.getHostPort()); List<HostAndPort> hostAndPorts = new ArrayList<>(); for (Address address : addressList) { hostAndPorts.add(HostAndPort.fromParts(address.getHost(), address.getPort())); } Consul.Builder consulBuilder = Consul.builder() // we should set this value or it will be blocked forever .withConnectTimeoutMillis(3000); if (StringUtils.isNotEmpty(config.getAclToken())) { consulBuilder.withAclToken(config.getAclToken()); } if (hostAndPorts.size() > 1) { client = consulBuilder.withMultipleHostAndPort(hostAndPorts, 5000).build(); } else { client = consulBuilder.withHostAndPort(hostAndPorts.get(0)).build(); } } catch (ConnectStringParseException | ConsulException e) { throw new ModuleStartException(e.getMessage(), e); } ConsulCoordinator coordinator = new ConsulCoordinator(getManager(), config, client); this.registerServiceImplementation(ClusterRegister.class, coordinator); this.registerServiceImplementation(ClusterNodesQuery.class, coordinator); this.registerServiceImplementation(ClusterCoordinator.class, coordinator); }
@Test @SuppressWarnings("unchecked") public void prepare() throws Exception { ClusterModuleConsulConfig consulConfig = new ClusterModuleConsulConfig(); consulConfig.setHostPort("10.0.0.1:1000,10.0.0.2:1001"); Whitebox.setInternalState(provider, "config", consulConfig); Consul consulClient = mock(Consul.class); Consul.Builder builder = mock(Consul.Builder.class); when(builder.build()).thenReturn(consulClient); try (MockedStatic<Consul> ignored = mockStatic(Consul.class)) { when(Consul.builder()).thenReturn(builder); when(builder.withConnectTimeoutMillis(anyLong())).thenReturn(builder); when(builder.withMultipleHostAndPort(anyCollection(), anyLong())).thenReturn(builder); provider.prepare(); ArgumentCaptor<Collection> addressCaptor = ArgumentCaptor.forClass(Collection.class); ArgumentCaptor<Long> timeCaptor = ArgumentCaptor.forClass(long.class); verify(builder).withMultipleHostAndPort(addressCaptor.capture(), timeCaptor.capture()); List<HostAndPort> address = (List<HostAndPort>) addressCaptor.getValue(); assertEquals(2, address.size()); assertEquals( Lists.newArrayList(HostAndPort.fromParts("10.0.0.1", 1000), HostAndPort.fromParts("10.0.0.2", 1001)), address ); } }
public String generateLogMessage( String header, Set<String> allowedMetricUrns, long lastReported) { MetricsContainerImpl nextMetricsSnapshot = new MetricsContainerImpl(this.stepName); nextMetricsSnapshot.update(this); MetricsContainerImpl deltaContainer = MetricsContainerImpl.deltaContainer(lastMetricsSnapshot, nextMetricsSnapshot); StringBuilder logMessage = new StringBuilder(); logMessage.append(header); logMessage.append(deltaContainer.getCumulativeString(allowedMetricUrns)); logMessage.append(String.format("(last reported at %s)%n", new Date(lastReported))); lastMetricsSnapshot = nextMetricsSnapshot; return logMessage.toString(); }
@Test public void testGeneratedLogMessageShowsDeltas() { MetricName cName = MonitoringInfoMetricName.named( MonitoringInfoConstants.Urns.ELEMENT_COUNT, Collections.singletonMap("name", "counter")); HistogramData.BucketType bucketType = HistogramData.LinearBuckets.of(0, 2, 5); MetricName hName = MonitoringInfoMetricName.named( MonitoringInfoConstants.Urns.ELEMENT_COUNT, Collections.singletonMap("name", "histogram")); MetricsLogger logger = new MetricsLogger(null); logger.getCounter(cName).inc(2L); // Set buckets counts to: [0,1,1,,0,0,...] logger.getHistogram(hName, bucketType).update(1); logger.getHistogram(hName, bucketType).update(3); Set<String> allowedMetricUrns = new HashSet<String>(); allowedMetricUrns.add(MonitoringInfoConstants.Urns.ELEMENT_COUNT); String msg = logger.generateLogMessage("My Headder", allowedMetricUrns, 0); assertThat(msg, CoreMatchers.containsString("beam:metric:element_count:v1 {name=counter} = 2")); assertThat( msg, CoreMatchers.containsString( "{name=histogram} = {count: 2, p50: 2.000000, p90: 3.600000, p99: 3.960000}")); logger.getCounter(cName).inc(3L); // Set buckets counts to: [0,5,6,0,0,0] // Which means a delta of: [0,4,5,0,0,0] for (int i = 0; i < 4; i++) { logger.getHistogram(hName, bucketType).update(1); } for (int i = 0; i < 5; i++) { logger.getHistogram(hName, bucketType).update(3); } msg = logger.generateLogMessage("My Header: ", allowedMetricUrns, 0); assertThat(msg, CoreMatchers.containsString("beam:metric:element_count:v1 {name=counter} = 3")); assertThat( msg, CoreMatchers.containsString( "{name=histogram} = {count: 9, p50: 2.200000, p90: 3.640000, p99: 3.964000}")); logger.getCounter(cName).inc(4L); // Set buckets counts to: [0,8,10,0,0,0] // Which means a delta of: [0,3,4,0,0,0] for (int i = 0; i < 3; i++) { logger.getHistogram(hName, bucketType).update(1); } for (int i = 0; i < 4; i++) { logger.getHistogram(hName, bucketType).update(3); } msg = logger.generateLogMessage("My Header: ", allowedMetricUrns, 0); assertThat( msg, CoreMatchers.containsString( "{name=histogram} = {count: 7, p50: 2.250000, p90: 3.650000, p99: 3.965000}")); }
public static URL getResourceUrl(String resource) throws IOException { if (resource.startsWith(CLASSPATH_PREFIX)) { String path = resource.substring(CLASSPATH_PREFIX.length()); ClassLoader classLoader = ResourceUtils.class.getClassLoader(); URL url = (classLoader != null ? classLoader.getResource(path) : ClassLoader.getSystemResource(path)); if (url == null) { throw new FileNotFoundException("Resource [" + resource + "] does not exist"); } return url; } try { return new URL(resource); } catch (MalformedURLException ex) { return new File(resource).toURI().toURL(); } }
@Test void testGetResourceUrlForClasspathNotExists() throws IOException { assertThrows(FileNotFoundException.class, () -> { ResourceUtils.getResourceUrl("classpath:non-exist.pem"); }); }
public static Builder builder() { return new Builder(); }
@Test // Test cases that are JSON that can be created via the Builder public void testRoundTripSerDe() throws JsonProcessingException { String fullJson = "{\"namespace\":[\"accounting\",\"tax\"],\"properties\":{\"owner\":\"Hank\"}}"; CreateNamespaceResponse req = CreateNamespaceResponse.builder() .withNamespace(NAMESPACE) .setProperties(PROPERTIES) .build(); assertRoundTripSerializesEquallyFrom(fullJson, req); String jsonEmptyProperties = "{\"namespace\":[\"accounting\",\"tax\"],\"properties\":{}}"; CreateNamespaceResponse responseWithExplicitEmptyProperties = CreateNamespaceResponse.builder() .withNamespace(NAMESPACE) .setProperties(EMPTY_PROPERTIES) .build(); assertRoundTripSerializesEquallyFrom(jsonEmptyProperties, responseWithExplicitEmptyProperties); CreateNamespaceResponse responseWithImplicitEmptyProperties = CreateNamespaceResponse.builder().withNamespace(NAMESPACE).build(); assertRoundTripSerializesEquallyFrom(jsonEmptyProperties, responseWithImplicitEmptyProperties); String jsonEmptyNamespace = "{\"namespace\":[],\"properties\":{}}"; CreateNamespaceResponse responseWithEmptyNamespace = CreateNamespaceResponse.builder().withNamespace(Namespace.empty()).build(); assertRoundTripSerializesEquallyFrom(jsonEmptyNamespace, responseWithEmptyNamespace); }
@Override public ExecuteContext onThrow(ExecuteContext context) { ThreadLocalUtils.removeRequestData(); ThreadLocalUtils.removeRequestTag(); return context; }
@Test public void testOnThrow() { ThreadLocalUtils.setRequestTag(new RequestTag(null)); ThreadLocalUtils.setRequestData(new RequestData(null, null, null)); interceptor.onThrow(context); Assert.assertNull(ThreadLocalUtils.getRequestTag()); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
@Override public boolean containsAll(IntSet set) { return set.isEmpty(); }
@Test public void testContainsAll1() throws Exception { Set<Integer> hashSet = new HashSet<>(); hashSet.add(3); assertFalse(es.containsAll(hashSet)); assertTrue(hashSet.containsAll(es)); hashSet.remove(3); assertTrue(hashSet.containsAll(es)); assertTrue(es.containsAll(hashSet)); }
public ActionResult apply(Agent agent, Map<String, String> request) { log.debug("Fetching url {} for agent {}", request.get("url"), agent.getId()); String url = request.get("url"); if (url == null || url.isEmpty()) { return ActionResult.builder() .status(ActionResult.Status.FAILURE) .summary("The url parameter is missing or has an empty value.") .error("The url parameter is missing or has an empty value.") .build(); } String content; try { content = webClient.get() .uri(url) .retrieve() .bodyToMono(String.class) .block(); } catch (WebClientResponseException e) { return handleWebFetchError(agent, url, String.format("Could not fetch url, error with http status code %d", e.getStatusCode().value())); } catch (Exception e) { return handleWebFetchError(agent, url, String.format("Could not fetch url, unexpected error.")); } return ActionResult .builder() .summary(String.format("Reading content at %s", url)) .status(ActionResult.Status.SUCCESS) .result(formatResult(url, content)) .build(); }
@Test void testApplyWithMissingUrl() { Map<String, String> request = new HashMap<>(); ActionResult result = rawBrowserAction.apply(agent, request); assertEquals(ActionResult.Status.FAILURE, result.getStatus()); assertEquals("The url parameter is missing or has an empty value.", result.getSummary()); assertEquals("The url parameter is missing or has an empty value.", result.getError()); }
@Override public AssessmentResult verify( final String siteKey, final Action action, final String token, final String ip) throws IOException { final DynamicCaptchaConfiguration config = dynamicConfigurationManager.getConfiguration().getCaptchaConfiguration(); final String body = String.format("response=%s&secret=%s&remoteip=%s", URLEncoder.encode(token, StandardCharsets.UTF_8), URLEncoder.encode(this.apiKey, StandardCharsets.UTF_8), ip); final HttpRequest request = HttpRequest.newBuilder() .uri(URI.create("https://hcaptcha.com/siteverify")) .header("Content-Type", "application/x-www-form-urlencoded") .POST(HttpRequest.BodyPublishers.ofString(body)) .build(); final HttpResponse<String> response; try { response = this.client.sendAsync(request, HttpResponse.BodyHandlers.ofString()).join(); } catch (CompletionException e) { logger.warn("failed to make http request to hCaptcha: {}", e.getMessage()); throw new IOException(ExceptionUtils.unwrap(e)); } if (response.statusCode() != Response.Status.OK.getStatusCode()) { logger.warn("failure submitting token to hCaptcha (code={}): {}", response.statusCode(), response); throw new IOException("hCaptcha http failure : " + response.statusCode()); } final HCaptchaResponse hCaptchaResponse = SystemMapper.jsonMapper() .readValue(response.body(), HCaptchaResponse.class); logger.debug("received hCaptcha response: {}", hCaptchaResponse); if (!hCaptchaResponse.success) { for (String errorCode : hCaptchaResponse.errorCodes) { Metrics.counter(INVALID_REASON_COUNTER_NAME, "action", action.getActionName(), "reason", errorCode).increment(); } return AssessmentResult.invalid(); } // hcaptcha uses the inverse scheme of recaptcha (for hcaptcha, a low score is less risky) final float score = 1.0f - hCaptchaResponse.score; if (score < 0.0f || score > 1.0f) { logger.error("Invalid score {} from hcaptcha response {}", hCaptchaResponse.score, hCaptchaResponse); return AssessmentResult.invalid(); } final BigDecimal threshold = config.getScoreFloorByAction().getOrDefault(action, config.getScoreFloor()); final AssessmentResult assessmentResult = AssessmentResult.fromScore(score, threshold.floatValue()); for (String reason : hCaptchaResponse.scoreReasons) { Metrics.counter(ASSESSMENT_REASON_COUNTER_NAME, "action", action.getActionName(), "reason", reason, "score", assessmentResult.getScoreString()).increment(); } return assessmentResult; }
@Test public void badBody() throws IOException, InterruptedException { final FaultTolerantHttpClient httpClient = mockResponder(200, """ {"success" : true, """); final HCaptchaClient client = new HCaptchaClient("fake", httpClient, mockConfig(true, 0.5)); assertThrows(IOException.class, () -> client.verify(SITE_KEY, Action.CHALLENGE, TOKEN, null)); }
public static ComposeCombineFnBuilder compose() { return new ComposeCombineFnBuilder(); }
@Test @Category({ValidatesRunner.class}) public void testComposedCombineNullValues() { p.getCoderRegistry() .registerCoderForClass(UserString.class, NullableCoder.of(UserStringCoder.of())); p.getCoderRegistry() .registerCoderForClass(String.class, NullableCoder.of(StringUtf8Coder.of())); PCollection<KV<String, KV<Integer, UserString>>> perKeyInput = p.apply( Create.timestamped( Arrays.asList( KV.of("a", KV.of(1, UserString.of("1"))), KV.of("a", KV.of(1, UserString.of("1"))), KV.of("a", KV.of(4, UserString.of("4"))), KV.of("b", KV.of(1, UserString.of("1"))), KV.of("b", KV.of(13, UserString.of("13")))), Arrays.asList(0L, 4L, 7L, 10L, 16L)) .withCoder( KvCoder.of( NullableCoder.of(StringUtf8Coder.of()), KvCoder.of( BigEndianIntegerCoder.of(), NullableCoder.of(UserStringCoder.of()))))); TupleTag<Integer> maxIntTag = new TupleTag<>(); TupleTag<UserString> concatStringTag = new TupleTag<>(); PCollection<KV<String, KV<Integer, String>>> combinePerKey = perKeyInput .apply( Combine.perKey( CombineFns.compose() .with(new GetIntegerFunction(), Max.ofIntegers(), maxIntTag) .with( new GetUserStringFunction(), new OutputNullString(), concatStringTag))) .apply( "ExtractPerKeyResult", ParDo.of(new ExtractResultDoFn(maxIntTag, concatStringTag))); PAssert.that(combinePerKey) .containsInAnyOrder( KV.of("a", KV.of(4, (String) null)), KV.of("b", KV.of(13, (String) null))); p.run(); }
public static <Req extends MessagingRequest> Matcher<Req> channelNameEquals(String channelName) { if (channelName == null) throw new NullPointerException("channelName == null"); if (channelName.isEmpty()) throw new NullPointerException("channelName is empty"); return new MessagingChannelNameEquals<Req>(channelName); }
@Test void channelNameEquals_unmatched() { when(request.channelName()).thenReturn("complaints"); assertThat(channelNameEquals("alerts").matches(request)).isFalse(); }
public AnalysisResult analysis(AnalysisResult result) { // 1. Set sub package name by source.metrics Class<? extends Metrics> metricsClass = MetricsHolder.find(result.getAggregationFuncStmt().getAggregationFunctionName()); String metricsClassSimpleName = metricsClass.getSimpleName(); result.setMetricsClassName(metricsClassSimpleName); // Optional for filter List<ConditionExpression> expressions = result.getFilters().getFilterExpressionsParserResult(); if (expressions != null && expressions.size() > 0) { for (ConditionExpression expression : expressions) { final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression filterExpression = new Expression(); filterExpression.setExpressionObject(matcherInfo.getMatcher().getName()); filterExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); filterExpression.setRight(expression.getValue()); result.getFilters().addFilterExpressions(filterExpression); } } // 3. Find Entrance method of this metrics Class<?> c = metricsClass; Method entranceMethod = null; SearchEntrance: while (!c.equals(Object.class)) { for (Method method : c.getMethods()) { Entrance annotation = method.getAnnotation(Entrance.class); if (annotation != null) { entranceMethod = method; break SearchEntrance; } } c = c.getSuperclass(); } if (entranceMethod == null) { throw new IllegalArgumentException("Can't find Entrance method in class: " + metricsClass.getName()); } EntryMethod entryMethod = new EntryMethod(); result.setEntryMethod(entryMethod); entryMethod.setMethodName(entranceMethod.getName()); // 4. Use parameter's annotation of entrance method to generate aggregation entrance. for (Parameter parameter : entranceMethod.getParameters()) { Class<?> parameterType = parameter.getType(); Annotation[] parameterAnnotations = parameter.getAnnotations(); if (parameterAnnotations == null || parameterAnnotations.length == 0) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't include the annotation."); } Annotation annotation = parameterAnnotations[0]; if (annotation instanceof SourceFrom) { entryMethod.addArg( parameterType, TypeCastUtil.withCast( result.getFrom().getSourceCastType(), "source." + ClassMethodUtil.toGetMethod(result.getFrom().getSourceAttribute()) ) ); } else if (annotation instanceof ConstOne) { entryMethod.addArg(parameterType, "1"); } else if (annotation instanceof org.apache.skywalking.oap.server.core.analysis.metrics.annotation.Expression) { if (isNull(result.getAggregationFuncStmt().getFuncConditionExpressions()) || result.getAggregationFuncStmt().getFuncConditionExpressions().isEmpty()) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " argument can't find funcParamExpression."); } else { ConditionExpression expression = result.getAggregationFuncStmt().getNextFuncConditionExpression(); final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression argExpression = new Expression(); argExpression.setRight(expression.getValue()); argExpression.setExpressionObject(matcherInfo.getMatcher().getName()); argExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); entryMethod.addArg(argExpression); } } else if (annotation instanceof Arg) { entryMethod.addArg(parameterType, result.getAggregationFuncStmt().getNextFuncArg()); } else { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't the expected annotation."); } } // 5. Get all column declared in MetricsHolder class. c = metricsClass; while (!c.equals(Object.class)) { for (Field field : c.getDeclaredFields()) { Column column = field.getAnnotation(Column.class); if (column != null) { result.addPersistentField( field.getName(), column.name(), field.getType()); } } c = c.getSuperclass(); } // 6. Based on Source, generate default columns List<SourceColumn> columns = SourceColumnsFactory.getColumns(result.getFrom().getSourceName()); result.setFieldsFromSource(columns); result.generateSerializeFields(); return result; }
@Test public void testFilterAnalysis() { AnalysisResult result = new AnalysisResult(); result.getFrom().setSourceName("Endpoint"); result.getFrom().getSourceAttribute().add("latency"); result.setMetricsName("EndpointAvg"); result.getAggregationFuncStmt().setAggregationFunctionName("longAvg"); ConditionExpression expression = new ConditionExpression(); expression.setExpressionType("stringMatch"); expression.getAttributes().add("name"); expression.setValue("\"/service/prod/save\""); result.getFilters().addFilterExpressionsParserResult(expression); DeepAnalysis analysis = new DeepAnalysis(); result = analysis.analysis(result); EntryMethod method = result.getEntryMethod(); Assertions.assertEquals("combine", method.getMethodName()); Assertions.assertEquals("(long)(source.getLatency())", method.getArgsExpressions().get(0)); Assertions.assertEquals("(long)(1)", method.getArgsExpressions().get(1)); List<SourceColumn> source = result.getFieldsFromSource(); Assertions.assertEquals(2, source.size()); List<DataColumn> persistentFields = result.getPersistentFields(); Assertions.assertEquals(4, persistentFields.size()); List<Expression> filterExpressions = result.getFilters().getFilterExpressions(); Assertions.assertEquals(1, filterExpressions.size()); Expression filterExpression = filterExpressions.get(0); Assertions.assertEquals(StringMatch.class.getName(), filterExpression.getExpressionObject()); Assertions.assertEquals("source.getName()", filterExpression.getLeft()); Assertions.assertEquals("\"/service/prod/save\"", filterExpression.getRight()); }
@Override public boolean isReadable(Class<?> type, @Nullable Type genericType, @Nullable Annotation[] annotations, @Nullable MediaType mediaType) { return isProvidable(type) && super.isReadable(type, genericType, annotations, mediaType); }
@Test void readsDeserializableTypes() { assertThat(provider.isReadable(Example.class, null, null, null)) .isTrue(); }
public static Map<String, String[]> getQueryMap(String query) { Map<String, String[]> map = new HashMap<>(); String[] params = query.split(PARAM_CONCATENATE); for (String param : params) { String[] paramSplit = param.split("="); if (paramSplit.length == 0) { continue; // We found no key-/value-pair, so continue on the next param } String name = decodeQuery(paramSplit[0]); // hack for SOAP request (generally) if (name.trim().startsWith("<?")) { // $NON-NLS-1$ map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } // the post payload is not key=value if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) { map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } String value = ""; if(paramSplit.length>1) { value = decodeQuery(paramSplit[1]); } String[] known = map.get(name); if(known == null) { known = new String[] {value}; } else { String[] tmp = new String[known.length+1]; tmp[tmp.length-1] = value; System.arraycopy(known, 0, tmp, 0, known.length); known = tmp; } map.put(name, known); } return map; }
@Test void testGetQueryMapAmpInValue() { String query = "param2=15&param1=12&param3=baul%26Pismuth"; Map<String, String[]> params = RequestViewHTTP.getQueryMap(query); Assertions.assertNotNull(params); Assertions.assertEquals(3, params.size()); String[] param1 = params.get("param1"); Assertions.assertNotNull(param1); Assertions.assertEquals(1, param1.length); Assertions.assertEquals("12", param1[0]); String[] param2 = params.get("param2"); Assertions.assertNotNull(param2); Assertions.assertEquals(1, param2.length); Assertions.assertEquals("15", param2[0]); String[] param3 = params.get("param3"); Assertions.assertNotNull(param3); Assertions.assertEquals(1, param3.length); Assertions.assertEquals("baul&Pismuth", param3[0]); }
@Override public InterpreterResult interpret(String cypherQuery, InterpreterContext interpreterContext) { LOGGER.info("Opening session"); if (StringUtils.isBlank(cypherQuery)) { return new InterpreterResult(Code.SUCCESS); } final List<String> queries = isMultiStatementEnabled ? Arrays.asList(cypherQuery.split(";[^'|^\"|^(\\w+`)]")) : Arrays.asList(cypherQuery); if (queries.size() == 1) { final String query = queries.get(0); return runQuery(query, interpreterContext); } else { final int lastIndex = queries.size() - 1; final List<String> subQueries = queries.subList(0, lastIndex); for (String query : subQueries) { runQuery(query, interpreterContext); } return runQuery(queries.get(lastIndex), interpreterContext); } }
@Test void testPoint() { InterpreterResult result = interpreter.interpret("RETURN point({ x:3, y:0 }) AS cartesian_2d," + "point({ x:0, y:4, z:1 }) AS cartesian_3d," + "point({ latitude: 12, longitude: 56 }) AS geo_2d," + "point({ latitude: 12, longitude: 56, height: 1000 }) AS geo_3d", context); assertEquals(Code.SUCCESS, result.code()); assertEquals("cartesian_2d\tcartesian_3d\tgeo_2d\tgeo_3d\n" + "Point{srid=7203, x=3.0, y=0.0}\tPoint{srid=9157, x=0.0, y=4.0, z=1.0}\t" + "Point{srid=4326, x=56.0, y=12.0}\tPoint{srid=4979, x=56.0, y=12.0, z=1000.0}\n", result.toString().replace(TABLE_RESULT_PREFIX, StringUtils.EMPTY)); result = interpreter.interpret( "WITH point({ latitude: 12, longitude: 56, height: 1000 }) AS geo_3d " + "RETURN geo_3d.latitude AS latitude, geo_3d.longitude AS longitude, " + "geo_3d.height AS height", context); assertEquals(Code.SUCCESS, result.code()); assertEquals("latitude\tlongitude\theight\n" + "12.0\t56.0\t1000.0\n", result.toString().replace(TABLE_RESULT_PREFIX, StringUtils.EMPTY)); }
@Override public Type[] getReturnTypes() { return returnTypes; }
@Test void getReturnTypes() { Assertions.assertArrayEquals(new Type[] {String.class, String.class}, method.getReturnTypes()); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Retain Key Ordering cannot be altered") public void testMergeDifferentRetainKeyOrdering() { SinkConfig sinkConfig = createSinkConfig(); SinkConfig newSinkConfig = createUpdatedSinkConfig("retainKeyOrdering", true); SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); }
@Override public ConcurrentJobModificationResolveResult resolve(Job localJob, Job storageProviderJob) { final Thread threadProcessingJob = jobSteward.getThreadProcessingJob(localJob); if (threadProcessingJob != null) { threadProcessingJob.interrupt(); } return ConcurrentJobModificationResolveResult.succeeded(storageProviderJob); }
@Test void ifJobIsHavingConcurrentStateChangeOnDifferentServerItWillResolveToTheStorageProviderJob() { //GIVEN final Job jobInProgress = aJobInProgress().build(); final Job jobInProgressOnOtherServer = aCopyOf(jobInProgress) .withState(new FailedState("Orphaned job", new IllegalStateException("Not important"))) .withState(new ScheduledState(Instant.now())) .withState(new EnqueuedState()) .withState(new ProcessingState(UUID.randomUUID(), "Not important")) .build(); // WHEN final ConcurrentJobModificationResolveResult resolveResult = allowedStateChange.resolve(jobInProgress, jobInProgressOnOtherServer); // THEN assertThat(resolveResult.failed()).isFalse(); assertThat(resolveResult.getLocalJob()).isEqualTo(jobInProgressOnOtherServer); verify(threadProcessingLocalJob).interrupt(); }
@Override public boolean delete(final Path f, final boolean recursive) throws IOException, UnresolvedLinkException { return myFs.delete(fullPath(f), recursive); }
@Test public void testCreateDelete() throws IOException { // Create file fileContextTestHelper.createFileNonRecursive(fc, "/foo"); Assert.assertTrue(isFile(fc, new Path("/foo"))); Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo, "foo"))); // Create file with recursive dir fileContextTestHelper.createFile(fc, "/newDir/foo"); Assert.assertTrue(isFile(fc, new Path("/newDir/foo"))); Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/foo"))); // Delete the created file Assert.assertTrue(fc.delete(new Path("/newDir/foo"), false)); Assert.assertFalse(exists(fc, new Path("/newDir/foo"))); Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo"))); // Create file with a 2 component dirs recursively fileContextTestHelper.createFile(fc, "/newDir/newDir2/foo"); Assert.assertTrue(isFile(fc, new Path("/newDir/newDir2/foo"))); Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo"))); // Delete the created file Assert.assertTrue(fc.delete(new Path("/newDir/newDir2/foo"), false)); Assert.assertFalse(exists(fc, new Path("/newDir/newDir2/foo"))); Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo"))); }
@Override public void close() { }
@Test public void shouldSucceed_addRemoteNode() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { localPublisher.accept(LOCAL_ROW1); localPublisher.accept(LOCAL_ROW2); }); Set<List<?>> rows = waitOnRows(2); nodes.set(ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); context.runOnContext(v -> { remotePublisher.accept(REMOTE_ROW1); remotePublisher.accept(REMOTE_ROW2); }); // Then: rows.addAll(waitOnRows(2)); handle.close(); assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true)); assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true)); assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true)); }
public CompletableFuture<Void> redeemReceipt( final Account account, final ReceiptCredentialPresentation receiptCredentialPresentation) { try { serverZkReceiptOperations.verifyReceiptCredentialPresentation(receiptCredentialPresentation); } catch (VerificationFailedException e) { throw Status.INVALID_ARGUMENT .withDescription("receipt credential presentation verification failed") .asRuntimeException(); } final ReceiptSerial receiptSerial = receiptCredentialPresentation.getReceiptSerial(); final Instant receiptExpiration = Instant.ofEpochSecond(receiptCredentialPresentation.getReceiptExpirationTime()); if (clock.instant().isAfter(receiptExpiration)) { throw Status.INVALID_ARGUMENT.withDescription("receipt is already expired").asRuntimeException(); } final long receiptLevel = receiptCredentialPresentation.getReceiptLevel(); if (BackupLevelUtil.fromReceiptLevel(receiptLevel) != BackupLevel.MEDIA) { throw Status.INVALID_ARGUMENT .withDescription("server does not recognize the requested receipt level") .asRuntimeException(); } return redeemedReceiptsManager .put(receiptSerial, receiptExpiration.getEpochSecond(), receiptLevel, account.getUuid()) .thenCompose(receiptAllowed -> { if (!receiptAllowed) { throw Status.INVALID_ARGUMENT .withDescription("receipt serial is already redeemed") .asRuntimeException(); } return accountsManager.updateAsync(account, a -> { final Account.BackupVoucher newPayment = new Account.BackupVoucher(receiptLevel, receiptExpiration); final Account.BackupVoucher existingPayment = a.getBackupVoucher(); account.setBackupVoucher(merge(existingPayment, newPayment)); }); }) .thenRun(Util.NOOP); }
@Test void redeemExpiredReceipt() { final Instant expirationTime = Instant.EPOCH.plus(Duration.ofDays(1)); clock.pin(expirationTime.plus(Duration.ofSeconds(1))); final BackupAuthManager authManager = create(BackupLevel.MESSAGES, false); Assertions.assertThatExceptionOfType(StatusRuntimeException.class) .isThrownBy(() -> authManager.redeemReceipt(mock(Account.class), receiptPresentation(3, expirationTime)).join()) .extracting(ex -> ex.getStatus().getCode()) .isEqualTo(Status.Code.INVALID_ARGUMENT); verifyNoInteractions(accountsManager); verifyNoInteractions(redeemedReceiptsManager); }
@Override public double variance() { return p * q; }
@Test public void testVariance() { System.out.println("variance"); BernoulliDistribution instance = new BernoulliDistribution(0.3); instance.rand(); assertEquals(0.21, instance.variance(), 1E-7); }
@Override public int hashCode() { return Objects.hash(url, delayTime, connectionTimeout, readTimeout, writeTimeout); }
@Test public void testHashCode() { Assertions.assertEquals(Objects.hash(httpConfig.getUrl(), httpConfig.getDelayTime(), httpConfig.getConnectionTimeout(), httpConfig.getReadTimeout(), httpConfig.getWriteTimeout()), httpConfig.hashCode()); }
public MethodBuilder service(String service) { this.service = service; return getThis(); }
@Test void service() { MethodBuilder builder = MethodBuilder.newBuilder(); builder.service("service"); Assertions.assertEquals("service", builder.build().getService()); }
@Override public String pickOneAtLeast() { final Enumeration<FaultItem> elements = this.faultItemTable.elements(); List<FaultItem> tmpList = new LinkedList<FaultItem>(); while (elements.hasMoreElements()) { final FaultItem faultItem = elements.nextElement(); tmpList.add(faultItem); } if (!tmpList.isEmpty()) { Collections.shuffle(tmpList); for (FaultItem faultItem : tmpList) { if (faultItem.reachableFlag) { return faultItem.name; } } } return null; }
@Test public void testPickOneAtLeast() throws Exception { latencyFaultTolerance.updateFaultItem(brokerName, 1000, 3000, true); assertThat(latencyFaultTolerance.pickOneAtLeast()).isEqualTo(brokerName); // Bad case, since pickOneAtLeast's behavior becomes random // latencyFaultTolerance.updateFaultItem(anotherBrokerName, 1001, 3000, "127.0.0.1:12011", true); // assertThat(latencyFaultTolerance.pickOneAtLeast()).isEqualTo(brokerName); }
@Override public Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException { // Specific to Postgres: If the updated record has TOASTED columns, // we will need to keep the previous value for those columns // see https://debezium.io/documentation/reference/connectors/postgresql.html#postgresql-toasted-values Option<IndexedRecord> insertOrDeleteRecord = super.combineAndGetUpdateValue(currentValue, schema, properties); if (insertOrDeleteRecord.isPresent()) { mergeToastedValuesIfPresent(insertOrDeleteRecord.get(), currentValue); } return insertOrDeleteRecord; }
@Test public void testMergeWithBootstrappedExistingRecords() throws IOException { GenericRecord incomingRecord = createRecord(3, Operation.UPDATE, 100L); PostgresDebeziumAvroPayload payload = new PostgresDebeziumAvroPayload(incomingRecord, 100L); GenericRecord existingRecord = createRecord(3, null, null); Option<IndexedRecord> mergedRecord = payload.combineAndGetUpdateValue(existingRecord, avroSchema); validateRecord(mergedRecord, 3, Operation.UPDATE, 100L); }
public static KeyStore loadKeyStore(File certificateChainFile, File privateKeyFile, String keyPassword) throws IOException, GeneralSecurityException { PrivateKey key; try { key = createPrivateKey(privateKeyFile, keyPassword); } catch (OperatorCreationException | IOException | GeneralSecurityException | PKCSException e) { throw new GeneralSecurityException("Private Key issues", e); } List<X509Certificate> certificateChain = readCertificateChain(certificateChainFile); if (certificateChain.isEmpty()) { throw new CertificateException("Certificate file does not contain any certificates: " + certificateChainFile); } KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null, null); keyStore.setKeyEntry("key", key, keyPassword.toCharArray(), certificateChain.stream().toArray(Certificate[]::new)); return keyStore; }
@Test void testParsingPKCS8WithoutPassword() throws IOException, GeneralSecurityException { KeyStore keystore = PEMImporter.loadKeyStore(pemCert, privkeyWithoutPasswordPKCS8, ""); assertEquals(1, keystore.size()); assertTrue(keystore.containsAlias("key")); assertEquals(1, keystore.getCertificateChain("key").length); }
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) { return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName); }
@Test(expected = NullPointerException.class) public void of_withNull() { ScheduledTaskHandler.of(null); }
static TaskExecutorResourceSpec resourceSpecFromConfig(Configuration config) { try { checkTaskExecutorResourceConfigSet(config); } catch (IllegalConfigurationException e) { throw new IllegalConfigurationException("Failed to create TaskExecutorResourceSpec", e); } return new TaskExecutorResourceSpec( new CPUResource(config.get(TaskManagerOptions.CPU_CORES)), config.get(TaskManagerOptions.TASK_HEAP_MEMORY), config.get(TaskManagerOptions.TASK_OFF_HEAP_MEMORY), config.get(TaskManagerOptions.NETWORK_MEMORY_MIN), config.get(TaskManagerOptions.MANAGED_MEMORY_SIZE), ExternalResourceUtils.getExternalResourcesCollection(config)); }
@Test void testResourceSpecFromConfig() { TaskExecutorResourceSpec resourceSpec = TaskExecutorResourceUtils.resourceSpecFromConfig(createValidConfig()); assertThat(resourceSpec.getCpuCores()).isEqualTo(new CPUResource(CPU_CORES)); assertThat(resourceSpec.getTaskHeapSize()).isEqualTo(TASK_HEAP); assertThat(resourceSpec.getTaskOffHeapSize()).isEqualTo(TASK_OFF_HEAP); assertThat(resourceSpec.getNetworkMemSize()).isEqualTo(NETWORK); assertThat(resourceSpec.getManagedMemorySize()).isEqualTo(MANAGED); assertThat( resourceSpec .getExtendedResources() .get(EXTERNAL_RESOURCE_NAME) .getValue() .longValue()) .isEqualTo(EXTERNAL_RESOURCE_AMOUNT); }
public List<String> getCve() { return cve; }
@Test @SuppressWarnings("squid:S2699") public void testGetCve() { //already tested, this is just left so the IDE doesn't recreate it. }
@Override public void insert(Metric metric) throws MetricException { try { // don't bother blocking on a full queue, just drop metrics in case we can't keep up if (queue.remainingCapacity() <= 0) { LOG.info("Metrics q full, dropping metric"); return; } queue.put(metric); } catch (Exception e) { String message = "Failed to insert metric"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } }
@Test public void testMetricCleanup() throws Exception { FilterOptions filter; List<Metric> list; // Share some common metadata strings to validate they do not get deleted String commonTopologyId = "topology-cleanup-2"; String commonStreamId = "stream-cleanup-5"; String defaultS = "default"; Metric m1 = new Metric(defaultS, 40000000L, commonTopologyId, 1.0, "component-1", defaultS, "hostname-1", commonStreamId, 1, AggLevel.AGG_LEVEL_NONE); Metric m2 = new Metric(defaultS, System.currentTimeMillis(), commonTopologyId, 1.0, "component-1", "executor-1", defaultS, commonStreamId, 1, AggLevel.AGG_LEVEL_NONE); store.insert(m1); store.insert(m2); waitForInsertFinish(m2); // validate at least two agg level none metrics exist filter = new FilterOptions(); filter.addAggLevel(AggLevel.AGG_LEVEL_NONE); list = getMetricsFromScan(filter); assertTrue(list.size() >= 2); // delete anything older than an hour MetricsCleaner cleaner = new MetricsCleaner((RocksDbStore)store, 1, 1, null, new StormMetricsRegistry()); cleaner.purgeMetrics(); list = getMetricsFromScan(filter); assertEquals(1, list.size()); assertTrue(list.contains(m2)); }
List<Transaction> sortTxnsByDependency(Set<Transaction> inputSet) { List<Transaction> result = new ArrayList<>(inputSet); for (int i = 0; i < result.size()-1; i++) { boolean txAtISpendsOtherTxInTheList; do { txAtISpendsOtherTxInTheList = false; for (int j = i+1; j < result.size(); j++) { if (spends(result.get(i), result.get(j))) { Transaction transactionAtI = result.remove(i); result.add(j, transactionAtI); txAtISpendsOtherTxInTheList = true; break; } } } while (txAtISpendsOtherTxInTheList); } return result; }
@Test public void sortTxnsByDependency() throws Exception { Transaction send1 = sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, valueOf(2, 0)); Transaction send1a = Objects.requireNonNull(wallet.createSend(OTHER_ADDRESS, valueOf(1, 0), true)); wallet.commitTx(send1a); Transaction send1b = Objects.requireNonNull(wallet.createSend(OTHER_ADDRESS, valueOf(0, 50), true)); wallet.commitTx(send1b); Transaction send1c = Objects.requireNonNull(wallet.createSend(OTHER_ADDRESS, valueOf(0, 25), true)); wallet.commitTx(send1c); Transaction send1d = Objects.requireNonNull(wallet.createSend(OTHER_ADDRESS, valueOf(0, 12), true)); wallet.commitTx(send1d); Transaction send1e = Objects.requireNonNull(wallet.createSend(OTHER_ADDRESS, valueOf(0, 06), true)); wallet.commitTx(send1e); Transaction send2 = sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, valueOf(200, 0)); SendRequest req2a = SendRequest.to(OTHER_ADDRESS, valueOf(100, 0)); req2a.tx.addInput(send2.getOutput(0)); req2a.shuffleOutputs = false; wallet.completeTx(req2a); Transaction send2a = req2a.tx; SendRequest req2b = SendRequest.to(OTHER_ADDRESS, valueOf(50, 0)); req2b.tx.addInput(send2a.getOutput(1)); req2b.shuffleOutputs = false; wallet.completeTx(req2b); Transaction send2b = req2b.tx; SendRequest req2c = SendRequest.to(OTHER_ADDRESS, valueOf(25, 0)); req2c.tx.addInput(send2b.getOutput(1)); req2c.shuffleOutputs = false; wallet.completeTx(req2c); Transaction send2c = req2c.tx; Set<Transaction> unsortedTxns = new HashSet<>(); unsortedTxns.add(send1a); unsortedTxns.add(send1b); unsortedTxns.add(send1c); unsortedTxns.add(send1d); unsortedTxns.add(send1e); unsortedTxns.add(send2a); unsortedTxns.add(send2b); unsortedTxns.add(send2c); List<Transaction> sortedTxns = wallet.sortTxnsByDependency(unsortedTxns); assertEquals(8, sortedTxns.size()); assertTrue(sortedTxns.indexOf(send1a) < sortedTxns.indexOf(send1b)); assertTrue(sortedTxns.indexOf(send1b) < sortedTxns.indexOf(send1c)); assertTrue(sortedTxns.indexOf(send1c) < sortedTxns.indexOf(send1d)); assertTrue(sortedTxns.indexOf(send1d) < sortedTxns.indexOf(send1e)); assertTrue(sortedTxns.indexOf(send2a) < sortedTxns.indexOf(send2b)); assertTrue(sortedTxns.indexOf(send2b) < sortedTxns.indexOf(send2c)); }
@Override public double getEle(double lat, double lon) { // Sometimes the cgiar data north of 59.999 equals 0 if (lat < 59.999 && lat > -56) { return srtmProvider.getEle(lat, lon); } return globalProvider.getEle(lat, lon); }
@Disabled @Test public void testGetEle() { instance = new MultiSourceElevationProvider(); double precision = .1; // The first part is copied from the SRTMGL1ProviderTest assertEquals(338, instance.getEle(49.949784, 11.57517), precision); assertEquals(468, instance.getEle(49.968668, 11.575127), precision); assertEquals(467, instance.getEle(49.968682, 11.574842), precision); assertEquals(3110, instance.getEle(-22.532854, -65.110474), precision); assertEquals(120, instance.getEle(38.065392, -87.099609), precision); assertEquals(1617, instance.getEle(40, -105.2277023), precision); assertEquals(1617, instance.getEle(39.99999999, -105.2277023), precision); assertEquals(1617, instance.getEle(39.9999999, -105.2277023), precision); assertEquals(1617, instance.getEle(39.999999, -105.2277023), precision); assertEquals(1015, instance.getEle(47.468668, 14.575127), precision); assertEquals(1107, instance.getEle(47.467753, 14.573911), precision); assertEquals(1930, instance.getEle(46.468835, 12.578777), precision); assertEquals(844, instance.getEle(48.469123, 9.576393), precision); // The file for this coordinate does not exist, but there is a ferry tagged in OSM assertEquals(0, instance.getEle(56.4787319, 17.6118363), precision); assertEquals(0, instance.getEle(56.4787319, 17.6118363), precision); // The second part is copied from the GMTEDProviderTest // Outside of SRTM covered area assertEquals(108, instance.getEle(60.0000001, 16), precision); assertEquals(0, instance.getEle(60.0000001, 19), precision); // Stor Roten assertEquals(14, instance.getEle(60.251, 18.805), precision); }
public boolean isInRange(String ipAddress) throws UnknownHostException { InetAddress address = InetAddress.getByName(ipAddress); BigInteger start = new BigInteger(1, this.startAddress.getAddress()); BigInteger end = new BigInteger(1, this.endAddress.getAddress()); BigInteger target = new BigInteger(1, address.getAddress()); int st = start.compareTo(target); int te = target.compareTo(end); return (st == -1 || st == 0) && (te == -1 || te == 0); }
@Test void testIpv4() throws UnknownHostException { CIDRUtils cidrUtils = new CIDRUtils("192.168.1.0/26"); Assertions.assertTrue(cidrUtils.isInRange("192.168.1.63")); Assertions.assertFalse(cidrUtils.isInRange("192.168.1.65")); cidrUtils = new CIDRUtils("192.168.1.192/26"); Assertions.assertTrue(cidrUtils.isInRange("192.168.1.199")); Assertions.assertFalse(cidrUtils.isInRange("192.168.1.190")); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_UINT32() { final MutableData data = new MutableData(new byte[4]); data.setValue(0x01020304, Data.FORMAT_UINT32_LE, 0); assertArrayEquals(new byte[] { 0x04, 0x03, 0x02, 0x01 } , data.getValue()); }
@Override public boolean isQuiescent(ApplicationId id) { try { ApplicationInstance application = serviceMonitor.getApplication(OrchestratorUtil.toApplicationInstanceReference(id, serviceMonitor)) .orElseThrow(ApplicationIdNotFoundException::new); List<ServiceCluster> contentClusters = application.serviceClusters().stream() .filter(VespaModelUtil::isContent) .toList(); // For all content clusters, probe whether maintenance is OK. OrchestratorContext context = OrchestratorContext.createContextForBatchProbe(clock); for (ServiceCluster cluster : contentClusters) { List<HostName> clusterControllers = VespaModelUtil.getClusterControllerInstancesInOrder(application, cluster.clusterId()); ClusterControllerClient client = clusterControllerClientFactory.createClient(clusterControllers, cluster.clusterId().s()); for (ServiceInstance service : cluster.serviceInstances()) { try { if ( ! client.trySetNodeState(context, service.hostName(), VespaModelUtil.getStorageNodeIndex(service.configId()), MAINTENANCE, ContentService.STORAGE_NODE, false)) return false; } catch (Exception e) { log.log(Level.INFO, "Failed probing for permission to set " + service + " in MAINTENANCE: " + Exceptions.toMessageString(e)); return false; } } } return true; } catch (ApplicationIdNotFoundException ignored) { return false; } }
@Test public void testIsQuiescent() { StatusService statusService = new ZkStatusService(new MockCurator(), mock(Metric.class), new TestTimer(), new DummyAntiServiceMonitor()); HostName hostName = new HostName("my.host"); HostName ccHost = new HostName("cc.host"); TenantId tenantId = new TenantId("tenant"); ApplicationInstanceId applicationInstanceId = new ApplicationInstanceId("app:env:region:instance"); ApplicationInstanceReference reference = new ApplicationInstanceReference(tenantId, applicationInstanceId); ApplicationId id = ApplicationId.from("tenant", "app", "instance"); ApplicationInstance applicationInstance = new ApplicationInstance(tenantId, applicationInstanceId, Set.of(new ServiceCluster(new ClusterId("foo"), ServiceType.STORAGE, Set.of(new ServiceInstance(new ConfigId("foo/storage/1"), hostName, ServiceStatus.UP), new ServiceInstance(new ConfigId("foo/storage/2"), hostName, ServiceStatus.UP))), new ServiceCluster(new ClusterId("bar"), ServiceType.SEARCH, Set.of(new ServiceInstance(new ConfigId("bar/storage/0"), hostName, ServiceStatus.UP), new ServiceInstance(new ConfigId("bar/storage/3"), hostName, ServiceStatus.UP))), new ServiceCluster(new ClusterId("cluster-controllers"), ServiceType.CLUSTER_CONTROLLER, Set.of(new ServiceInstance(new ConfigId("what/standalone/cluster-controllers/0"), ccHost, ServiceStatus.UP))))); ServiceMonitor serviceMonitor = () -> new ServiceModel(Map.of(reference, applicationInstance)); ClusterControllerClientFactory clusterControllerClientFactory = mock(ClusterControllerClientFactory.class); ClusterControllerClient fooClient = mock(ClusterControllerClient.class); ClusterControllerClient barClient = mock(ClusterControllerClient.class); when(clusterControllerClientFactory.createClient(List.of(ccHost), "foo")).thenReturn(fooClient); when(clusterControllerClientFactory.createClient(List.of(ccHost), "bar")).thenReturn(barClient); orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone), clusterControllerClientFactory, applicationApiFactory, flagSource), clusterControllerClientFactory, statusService, serviceMonitor, 0, new ManualClock(), applicationApiFactory, flagSource); when(fooClient.trySetNodeState(any(), any(), eq(1), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenReturn(true); when(fooClient.trySetNodeState(any(), any(), eq(2), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenReturn(true); when(barClient.trySetNodeState(any(), any(), eq(0), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenReturn(true); when(barClient.trySetNodeState(any(), any(), eq(3), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenReturn(true); assertTrue(orchestrator.isQuiescent(id)); when(fooClient.trySetNodeState(any(), any(), eq(2), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenReturn(false); assertFalse(orchestrator.isQuiescent(id)); when(fooClient.trySetNodeState(any(), any(), eq(2), eq(ClusterControllerNodeState.MAINTENANCE), eq(ContentService.STORAGE_NODE), eq(false))).thenThrow(new RuntimeException()); assertFalse(orchestrator.isQuiescent(id)); }
@Override protected CompletableFuture<JobExecutionResultResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final JobID jobId = request.getPathParameter(JobIDPathParameter.class); final CompletableFuture<JobStatus> jobStatusFuture = gateway.requestJobStatus(jobId, timeout); return jobStatusFuture .thenCompose( jobStatus -> { if (jobStatus.isGloballyTerminalState()) { return gateway.requestJobResult(jobId, timeout) .thenApply(JobExecutionResultResponseBody::created); } else { return CompletableFuture.completedFuture( JobExecutionResultResponseBody.inProgress()); } }) .exceptionally( throwable -> { throw propagateException(throwable); }); }
@Test void testPropagateFlinkJobNotFoundExceptionAsRestHandlerException() throws Exception { final TestingRestfulGateway testingRestfulGateway = new TestingRestfulGateway.Builder() .setRequestJobStatusFunction( jobId -> FutureUtils.completedExceptionally( new FlinkJobNotFoundException(jobId))) .build(); assertThatFuture( jobExecutionResultHandler.handleRequest(testRequest, testingRestfulGateway)) .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(RestHandlerException.class) .satisfies( e -> assertThat( ((RestHandlerException) e.getCause()) .getHttpResponseStatus()) .isEqualTo(HttpResponseStatus.NOT_FOUND)); }
public Set<String> getRolesBy(String userName) { return _rolesPerUsers.getOrDefault(userName, Collections.emptySet()); }
@Test public void testGetRolesPerUsers() { KafkaCruiseControlConfig cruiseControlConfig = setupConfigurations(true); UserPermissionsManager userPermissionsManager = new UserPermissionsManager(cruiseControlConfig); assertEquals(new HashSet<>(Arrays.asList("ADMIN", "USER")), userPermissionsManager.getRolesBy("ccTestAdmin")); assertEquals(new HashSet<>(Arrays.asList("USER", "VIEWER")), userPermissionsManager.getRolesBy("ccTestUser")); assertEquals(Collections.singleton("VIEWER"), userPermissionsManager.getRolesBy("ccTestUser2")); assertEquals(Collections.singleton("ADMIN"), userPermissionsManager.getRolesBy("kafka")); }
@WorkerThread @Override public Unit call() throws IOException, StreamNotFoundException, ShellNotRunningException, IllegalArgumentException { OutputStream outputStream; File destFile = null; switch (fileAbstraction.scheme) { case CONTENT: Objects.requireNonNull(fileAbstraction.uri); if (fileAbstraction.uri.getAuthority().equals(context.get().getPackageName())) { DocumentFile documentFile = DocumentFile.fromSingleUri(AppConfig.getInstance(), fileAbstraction.uri); if (documentFile != null && documentFile.exists() && documentFile.canWrite()) { outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt"); } else { destFile = FileUtils.fromContentUri(fileAbstraction.uri); outputStream = openFile(destFile, context.get()); } } else { outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt"); } break; case FILE: final HybridFileParcelable hybridFileParcelable = fileAbstraction.hybridFileParcelable; Objects.requireNonNull(hybridFileParcelable); Context context = this.context.get(); if (context == null) { return null; } outputStream = openFile(hybridFileParcelable.getFile(), context); destFile = fileAbstraction.hybridFileParcelable.getFile(); break; default: throw new IllegalArgumentException( "The scheme for '" + fileAbstraction.scheme + "' cannot be processed!"); } Objects.requireNonNull(outputStream); outputStream.write(dataToSave.getBytes()); outputStream.close(); if (cachedFile != null && cachedFile.exists() && destFile != null) { // cat cache content to original file and delete cache file ConcatenateFileCommand.INSTANCE.concatenateFile(cachedFile.getPath(), destFile.getPath()); cachedFile.delete(); } return Unit.INSTANCE; }
@Test(expected = IllegalArgumentException.class) public void testWriteBogeyUri() throws ShellNotRunningException, IOException, StreamNotFoundException { Uri uri = Uri.parse("ftp://bogey.ftp/test.txt"); Context ctx = ApplicationProvider.getApplicationContext(); ContentResolver cr = ctx.getContentResolver(); ByteArrayOutputStream bout = new ByteArrayOutputStream(); shadowOf(cr).registerOutputStream(uri, bout); WriteTextFileCallable task = new WriteTextFileCallable( ctx, cr, new EditableFileAbstraction(ctx, uri), contents, null, false); task.call(); }
String getFileName(double lat, double lon) { int lonInt = getMinLonForTile(lon); int latInt = getMinLatForTile(lat); return toLowerCase(getLatString(latInt) + getNorthString(latInt) + getLonString(lonInt) + getEastString(lonInt) + FILE_NAME_END); }
@Test public void testGetFileName() { assertEquals("30n000e_20101117_gmted_mea075", instance.getFileName(42.940339, 11.953125)); assertEquals("30n090w_20101117_gmted_mea075", instance.getFileName(38.548165, -77.167969)); assertEquals("70n180w_20101117_gmted_mea075", instance.getFileName(74.116047, -169.277344)); assertEquals("70s180w_20101117_gmted_mea075", instance.getFileName(-61.015725, -156.621094)); assertEquals("70n150e_20101117_gmted_mea075", instance.getFileName(74.590108, 166.640625)); assertEquals("70s150e_20101117_gmted_mea075", instance.getFileName(-61.015725, 162.949219)); }
public ContentPackInstallation installContentPack(ContentPack contentPack, Map<String, ValueReference> parameters, String comment, String user) { if (contentPack instanceof ContentPackV1 contentPackV1) { return installContentPack(contentPackV1, parameters, comment, user); } else { throw new IllegalArgumentException("Unsupported content pack version: " + contentPack.version()); } }
@Test public void installContentPackWithCloudCheck() throws Exception { ImmutableSet<Entity> entities = ImmutableSet.of(createTestGelfUDPEntity()); ContentPackV1 contentPack = ContentPackV1.builder() .description("test") .entities(entities) .name("test") .revision(1) .summary("") .vendor("") .url(URI.create("http://graylog.com")) .id(ModelId.of("dead-beef")) .build(); Input input = mock(Input.class); GELFUDPInput gelfUDPInput = mock(GELFUDPInput.class); when(messageInputFactory.create(any(), any())).thenReturn(gelfUDPInput); when(inputService.find(any())).thenReturn(input); when(input.getId()).thenReturn("id1"); when(input.getTitle()).thenReturn("myGelfUDP"); ArgumentCaptor<ContentPackInstallation> captor = ArgumentCaptor.forClass(ContentPackInstallation.class); when(contentPackInstallService.insert(captor.capture())).thenReturn(null); when(configuration.isCloud()).thenReturn(false); contentPackService.installContentPack(contentPack, Collections.emptyMap(), "", TEST_USER); assertThat(captor.getValue().entities()).hasSize(1); when(configuration.isCloud()).thenReturn(true); contentPackService.installContentPack(contentPack, Collections.emptyMap(), "", TEST_USER); assertThat(captor.getValue().entities()).isEmpty(); }
protected static void processPostRequest(InputStream in, CommandRequest request) throws RequestException, IOException { Map<String, String> headerMap = parsePostHeaders(in); if (headerMap == null) { // illegal request CommandCenterLog.warn("Illegal request read: null headerMap"); throw new RequestException(StatusCode.BAD_REQUEST, ""); } if (headerMap.containsKey("content-type") && !checkContentTypeSupported(headerMap.get("content-type"))) { // not supported Content-type CommandCenterLog.warn("Request not supported: unsupported Content-Type: " + headerMap.get("content-type")); throw new RequestException(StatusCode.UNSUPPORTED_MEDIA_TYPE, "Only form-encoded post request is supported"); } int bodyLength = 0; try { bodyLength = Integer.parseInt(headerMap.get("content-length")); } catch (Exception e) { } if (bodyLength < 1) { // illegal request without Content-length header CommandCenterLog.warn("Request not supported: no available Content-Length in headers"); throw new RequestException(StatusCode.LENGTH_REQUIRED, "No legal Content-Length"); } parseParams(readBody(in, bodyLength), request); }
@Test public void processPostRequest() throws IOException { CommandRequest request; request = new CommandRequest(); request.addParam("a", "1"); // illegal(empty) request try { HttpEventTask.processPostRequest(new ByteArrayInputStream("".getBytes()), request); assertFalse(true); // should not reach here } catch (Exception e) { assertTrue(e instanceof RequestException); } assertEquals("1", request.getParam("a")); // normal request try { HttpEventTask.processPostRequest(new ByteArrayInputStream(("Host: demo.com\r\n" + "Accept: */*\r\n" + "Accept-Language: en-us\r\n" + "Accept-Encoding: gzip, deflate\r\n" + "Content-Type: application/x-www-form-urlencoded; charset=UTF-8\r\n" + "Connection: keep-alive\r\n" + "Content-Length: 10\r\n" + "\r\n" + "a=3&b=5็š„").getBytes()), request); assertEquals("3", request.getParam("a")); assertEquals("5็š„", request.getParam("b")); } catch (Exception e) { assertTrue(false); // should not reach here } // not supported request try { HttpEventTask.processPostRequest(new ByteArrayInputStream(("Host: demo.com\r\n" + "Accept: */*\r\n" + "Accept-Language: en-us\r\n" + "Accept-Encoding: gzip, deflate\r\n" + "Content-Type: application/json\r\n" + "Connection: keep-alive\r\n" + "Content-Length: 7\r\n" + "\r\n" + "a=1&b=2").getBytes()), request); assertTrue(false); // should not reach here } catch (RequestException e) { assertTrue(e.getStatusCode() == StatusCode.UNSUPPORTED_MEDIA_TYPE); } // Capacity test char[] buf = new char[1024 * 1024]; Arrays.fill(buf, '&'); String padding = new String(buf); try { request = new CommandRequest(); HttpEventTask.processPostRequest(new ByteArrayInputStream(("Host: demo.com\r\n" + "Accept: */*\r\n" + "Accept-Language: en-us\r\n" + "Accept-Encoding: gzip, deflate\r\n" + "Content-Type: application/x-www-form-urlencoded\r\n" + "Connection: keep-alive\r\n" + "Content-Length: 7\r\n" + "\r\n" + padding + "a=1&b=2").getBytes()), request); assertEquals(0, request.getParameters().size()); } catch (Exception e) { assertTrue(false); } try { String querystring = "a+=+&b=%E7%9A%84็š„"; request = new CommandRequest(); HttpEventTask.processPostRequest(new ByteArrayInputStream(("Host: demo.com\r\n" + "Accept: */*\r\n" + "Accept-Language: en-us\r\n" + "Accept-Encoding: gzip, deflate\r\n" + "Content-Type: application/x-www-form-urlencoded\r\n" + "Connection: keep-alive\r\n" + "Content-Length: " + (padding.length() + querystring.getBytes().length) + "\r\n" + "\r\n" + padding + querystring).getBytes()), request); assertEquals(2, request.getParameters().size()); assertEquals(" ", request.getParam("a ")); assertEquals("็š„็š„", request.getParam("b")); } catch (Exception e) { assertTrue(false); } }
@Override public boolean tryAcquireJobLock(Configuration conf) { Path path = new Path(locksDir, String.format(LOCKS_DIR_JOB_FILENAME, getJobJtIdentifier(conf))); return tryCreateFile(conf, path); }
@Test public void tryAcquireJobLockTest() { boolean firstAttempt = tested.tryAcquireJobLock(configuration); boolean secondAttempt = tested.tryAcquireJobLock(configuration); boolean thirdAttempt = tested.tryAcquireJobLock(configuration); assertTrue(isFileExists(getJobLockPath())); tested.releaseJobIdLock(configuration); boolean fourthAttempt = tested.tryAcquireJobLock(configuration); boolean fifthAttempt = tested.tryAcquireJobLock(configuration); assertTrue(firstAttempt); assertFalse(secondAttempt); assertFalse(thirdAttempt); assertTrue(fourthAttempt); assertFalse(fifthAttempt); }