focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { meta = (AddXMLMeta) smi; data = (AddXMLData) sdi; Object[] r = getRow(); // This also waits for a row to be finished. if ( r == null ) { // no more input to be expected... setOutputDone(); return false; } if ( first ) { first = false; data.outputRowMeta = getInputRowMeta().clone(); meta.getFields( data.outputRowMeta, getStepname(), null, null, this, repository, metaStore ); // Cache the field name indexes // data.fieldIndexes = new int[meta.getOutputFields().length]; for ( int i = 0; i < data.fieldIndexes.length; i++ ) { String fieldsName = meta.getOutputFields()[i].getFieldName(); data.fieldIndexes[i] = getInputRowMeta().indexOfValue( fieldsName ); if ( data.fieldIndexes[i] < 0 ) { throw new KettleException( BaseMessages.getString( PKG, "AddXML.Exception.FieldNotFound", fieldsName ) ); } } } Document xmldoc = getDomImplentation().createDocument( null, meta.getRootNode(), null ); Element root = xmldoc.getDocumentElement(); for ( int i = 0; i < meta.getOutputFields().length; i++ ) { XMLField outputField = meta.getOutputFields()[i]; String fieldname = outputField.getFieldName(); ValueMetaInterface v = getInputRowMeta().getValueMeta( data.fieldIndexes[i] ); Object valueData = r[data.fieldIndexes[i]]; if ( !meta.isOmitNullValues() || !v.isNull( valueData ) ) { String value = formatField( v, valueData, outputField ); String element = outputField.getElementName(); if ( element == null || element.length() == 0 ) { element = fieldname; } if ( element == null || element.length() == 0 ) { throw new KettleException( "XML does not allow empty strings for element names." ); } if ( outputField.isAttribute() ) { String attributeParentName = outputField.getAttributeParentName(); Element node; if ( attributeParentName == null || attributeParentName.length() == 0 ) { node = root; } else { NodeList nodelist = root.getElementsByTagName( attributeParentName ); if ( nodelist.getLength() > 0 ) { node = (Element) nodelist.item( 0 ); } else { node = root; } } node.setAttribute( element, value ); } else { /* encode as subnode */ if ( !element.equals( meta.getRootNode() ) ) { Element e = xmldoc.createElement( element ); Node n = xmldoc.createTextNode( value ); e.appendChild( n ); root.appendChild( e ); } else { Node n = xmldoc.createTextNode( value ); root.appendChild( n ); } } } } StringWriter sw = new StringWriter(); DOMSource domSource = new DOMSource( xmldoc ); try { this.getSerializer().transform( domSource, new StreamResult( sw ) ); } catch ( TransformerException e ) { throw new KettleException( e ); } catch ( Exception e ) { throw new KettleException( e ); } Object[] outputRowData = RowDataUtil.addValueData( r, getInputRowMeta().size(), sw.toString() ); putRow( data.outputRowMeta, outputRowData ); return true; }
@Test public void testProcessRow() throws KettleException { AddXML addXML = new AddXML( stepMockHelper.stepMeta, stepMockHelper.stepDataInterface, 0, stepMockHelper.transMeta, stepMockHelper.trans ); addXML.init( stepMockHelper.initStepMetaInterface, stepMockHelper.initStepDataInterface ); addXML.setInputRowSets( asList( createSourceRowSet( "ADDXML_TEST" ) ) ); assertTrue( addXML.processRow( stepMockHelper.initStepMetaInterface, stepMockHelper.processRowsStepDataInterface ) ); assertTrue( addXML.getErrors() == 0 ); assertTrue( addXML.getLinesWritten() > 0 ); }
public static SelectExpression parseSelectExpression(final String expressionText) { final SqlBaseParser.SelectItemContext parseCtx = GrammarParseUtil.getParseTree( expressionText, SqlBaseParser::selectItem ); if (!(parseCtx instanceof SqlBaseParser.SelectSingleContext)) { throw new IllegalArgumentException("Illegal select item type in: " + expressionText); } final SqlBaseParser.SelectSingleContext selectSingleContext = (SqlBaseParser.SelectSingleContext) parseCtx; if (selectSingleContext.identifier() == null) { throw new IllegalArgumentException("Select item must have identifier in: " + expressionText); } return SelectExpression.of( ColumnName.of(ParserUtil.getIdentifierText(selectSingleContext.identifier())), new AstBuilder(TypeRegistry.EMPTY).buildExpression(selectSingleContext.expression()) ); }
@Test public void shouldThrowOnAllColumns() { // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> parseSelectExpression("*") ); // Then: assertThat(e.getMessage(), containsString("Illegal select item type in: *")); }
@Override public void callAfterLog() { if ( parent != null ) { parent.callAfterLog(); } this.logDate = new Date(); }
@Test public void testJobCallAfterLog() { Trans trans = new Trans(); LoggingObjectInterface parent = mock( LoggingObjectInterface.class ); setInternalState( trans, "parent", parent ); trans.callAfterLog(); verify( parent, times( 1 ) ).callAfterLog(); }
public MessageDescription shallowParseMessage(ByteBuf packet) { final ByteBuf buffer = packet.readSlice(MessageHeader.LENGTH); LOG.debug("Shallow parse header\n{}", ByteBufUtil.prettyHexDump(buffer)); final MessageHeader header = parseMessageHeader(buffer); final MessageDescription messageDescription = new MessageDescription(header); // sanity check: we need the complete packet in the buffer if (header.length() != packet.readableBytes() + MessageHeader.LENGTH) { throw new IllegalArgumentException("Buffer does not contain the complete IPFIX message"); } // loop over all the contained sets in the message while (packet.isReadable()) { final int setId = packet.readUnsignedShort(); final int setLength = packet.readUnsignedShort(); // the buffer limited to the declared length of the set. final ByteBuf setContent = packet.readSlice(setLength - 4); switch (setId) { case 0: case 1: throw new IpfixException("Invalid set id in IPFIX message: " + setId); case 2: final ShallowTemplateSet templateSet = shallowParseTemplateSet(setContent); messageDescription.addTemplateSet(templateSet); break; case 3: final ShallowOptionsTemplateSet optionsTemplateSet = shallowParseOptionsTemplateSet(setContent); messageDescription.addOptionsTemplateSet(optionsTemplateSet); break; default: final ShallowDataSet dataSet = shallowParseDataSet(setId, setLength, setContent, header.exportTime()); messageDescription.addDataSet(dataSet); break; } } return messageDescription; }
@Test public void shallowParsePacket() throws IOException { final ByteBuf packet = Utils.readPacket("templates-data.ipfix"); final IpfixParser.MessageDescription description = new IpfixParser(definitions).shallowParseMessage(packet); assertThat(description).isNotNull(); assertThat(description.referencedTemplateIds()).contains(256); assertThat(description.declaredTemplateIds()).contains(256, 257); assertThat(description.declaredOptionsTemplateIds()).contains(258); }
@Override public void updateArticleCategory(ArticleCategoryUpdateReqVO updateReqVO) { // 校验存在 validateArticleCategoryExists(updateReqVO.getId()); // 更新 ArticleCategoryDO updateObj = ArticleCategoryConvert.INSTANCE.convert(updateReqVO); articleCategoryMapper.updateById(updateObj); }
@Test public void testUpdateArticleCategory_success() { // mock 数据 ArticleCategoryDO dbArticleCategory = randomPojo(ArticleCategoryDO.class); articleCategoryMapper.insert(dbArticleCategory);// @Sql: 先插入出一条存在的数据 // 准备参数 ArticleCategoryUpdateReqVO reqVO = randomPojo(ArticleCategoryUpdateReqVO.class, o -> { o.setId(dbArticleCategory.getId()); // 设置更新的 ID }); // 调用 articleCategoryService.updateArticleCategory(reqVO); // 校验是否更新正确 ArticleCategoryDO articleCategory = articleCategoryMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, articleCategory); }
@Override protected void doGet(HttpServletRequest httpRequest, HttpServletResponse httpResponse) throws ServletException, IOException { final FilterContext filterContext = (FilterContext) servletConfig.getServletContext() .getAttribute(FILTER_CONTEXT_KEY); assert filterContext != null; if (!httpAuth.isAllowed(httpRequest, httpResponse)) { return; } final Collector collector = filterContext.getCollector(); final MonitoringController monitoringController = new MonitoringController(collector, null); monitoringController.doActionIfNeededAndReport(httpRequest, httpResponse, servletConfig.getServletContext()); // Ici ReportServlet comme MonitoringFilter#doMonitoring // par exemple pour serveur de collecte branché sur le management endpoint de Spring Boot (#1121). if ("stop".equalsIgnoreCase(HttpParameter.COLLECTOR.getParameterFrom(httpRequest))) { // on a été appelé par un serveur de collecte qui fera l'aggrégation dans le temps, // le stockage et les courbes, donc on arrête le timer s'il est démarré // et on vide les stats pour que le serveur de collecte ne récupère que les deltas for (final Counter counter : collector.getCounters()) { counter.clear(); } if (!collector.isStopped()) { LOG.debug( "Stopping the javamelody collector in this webapp, because a collector server from " + httpRequest.getRemoteAddr() + " wants to collect the data itself"); filterContext.stopCollector(); } } }
@Test public void testDoGet() throws ServletException, IOException { doGet(Collections.emptyMap(), true); setProperty(Parameter.ALLOWED_ADDR_PATTERN, "256.*"); try { doGet(Collections.emptyMap(), false); setProperty(Parameter.ALLOWED_ADDR_PATTERN, ".*"); doGet(Collections.emptyMap(), false); } finally { setProperty(Parameter.ALLOWED_ADDR_PATTERN, null); } }
public Map<String, Object> getTelemetryResponse(User currentUser) { TelemetryUserSettings telemetryUserSettings = getTelemetryUserSettings(currentUser); if (isTelemetryEnabled && telemetryUserSettings.telemetryEnabled()) { DateTime clusterCreationDate = telemetryClusterService.getClusterCreationDate().orElse(null); String clusterId = telemetryClusterService.getClusterId(); List<TelemetryLicenseStatus> licenseStatuses = enterpriseDataProvider.licenseStatus(); return telemetryResponseFactory.createTelemetryResponse( getClusterInfo(clusterId, clusterCreationDate, licenseStatuses), getUserInfo(currentUser, clusterId), getPluginInfo(), getSearchClusterInfo(), licenseStatuses, telemetryUserSettings, getDataNodeInfo()); } else { return telemetryResponseFactory.createTelemetryDisabledResponse(telemetryUserSettings); } }
@Test void test_telemetry_is_disabled_for_user() { TelemetryService telemetryService = createTelemetryService(true); mockUserTelemetryEnabled(false); Map<String, Object> response = telemetryService.getTelemetryResponse(user); assertThat(response).containsOnlyKeys(USER_TELEMETRY_SETTINGS); }
static String getBootstrapServers(Map<String, ?> configs) { Object port = configs.get("port"); String listeners = String.valueOf(configs.get("listeners")); if (!"null".equals(listeners) && listeners.length() != 0) { // See https://kafka.apache.org/documentation/#listeners for possible responses. If multiple listeners are configured, this function // picks the first listener in the list of listeners. Hence, users of this config must adjust their order accordingly. String firstListener = listeners.split("\\s*,\\s*")[0]; String[] protocolHostPort = firstListener.split(":"); // Use port of listener only if no explicit config specified for KafkaConfig.PortProp(). String portToUse = port == null ? protocolHostPort[protocolHostPort.length - 1] : String.valueOf(port); // Use host of listener if one is specified. return ((protocolHostPort[1].length() == 2) ? DEFAULT_BOOTSTRAP_SERVERS_HOST : protocolHostPort[1].substring(2)) + ":" + portToUse; } return DEFAULT_BOOTSTRAP_SERVERS_HOST + ":" + (port == null ? DEFAULT_BOOTSTRAP_SERVERS_PORT : port); }
@Test public void testGetKafkaBootstrapServersConfigure() { // Test with a "listeners" config with a host Map<Object, Object> brokerConfig = buildBrokerConfigs().get(0); Map<String, Object> listenersMap = Collections.singletonMap(KafkaConfig.ListenersProp(), brokerConfig.get(KafkaConfig.ListenersProp())); String bootstrapServers = CruiseControlMetricsReporter.getBootstrapServers(listenersMap); String urlParse = "\\[?([0-9a-zA-Z\\-%._:]*)]?:(-?[0-9]+)"; Pattern urlParsePattern = Pattern.compile(urlParse); assertTrue(urlParsePattern.matcher(bootstrapServers).matches()); assertEquals(HOST, bootstrapServers.split(":")[0]); // Test with a "listeners" config without a host in the first listener. String listeners = "SSL://:1234,PLAINTEXT://myhost:4321"; listenersMap = Collections.singletonMap(KafkaConfig.ListenersProp(), listeners); bootstrapServers = CruiseControlMetricsReporter.getBootstrapServers(listenersMap); assertTrue(urlParsePattern.matcher(bootstrapServers).matches()); assertEquals(DEFAULT_BOOTSTRAP_SERVERS_HOST, bootstrapServers.split(":")[0]); assertEquals("1234", bootstrapServers.split(":")[1]); // Test with "listeners" and "port" config together. listenersMap = new HashMap<>(); listenersMap.put(KafkaConfig.ListenersProp(), listeners); listenersMap.put("port", "43"); bootstrapServers = CruiseControlMetricsReporter.getBootstrapServers(listenersMap); assertTrue(urlParsePattern.matcher(bootstrapServers).matches()); assertEquals(DEFAULT_BOOTSTRAP_SERVERS_HOST, bootstrapServers.split(":")[0]); assertEquals("43", bootstrapServers.split(":")[1]); // Test with null "listeners" and "port" config. bootstrapServers = CruiseControlMetricsReporter.getBootstrapServers(Collections.emptyMap()); assertTrue(urlParsePattern.matcher(bootstrapServers).matches()); assertEquals(DEFAULT_BOOTSTRAP_SERVERS_HOST, bootstrapServers.split(":")[0]); assertEquals(DEFAULT_BOOTSTRAP_SERVERS_PORT, bootstrapServers.split(":")[1]); }
public static String replaceSuffix(String str, String oldSuffix, String newSuffix) { if (!str.endsWith(oldSuffix)) throw new IllegalArgumentException("Expected string to end with " + oldSuffix + " but string is " + str); return str.substring(0, str.length() - oldSuffix.length()) + newSuffix; }
@Test public void testReplaceSuffix() { assertEquals("blah.foo.text", Utils.replaceSuffix("blah.foo.txt", ".txt", ".text")); assertEquals("blah.foo", Utils.replaceSuffix("blah.foo.txt", ".txt", "")); assertEquals("txt.txt", Utils.replaceSuffix("txt.txt.txt", ".txt", "")); assertEquals("foo.txt", Utils.replaceSuffix("foo", "", ".txt")); }
public void write(final ConsumerRecord<byte[], byte[]> record) throws IOException { if (!writable) { throw new IOException("Write permission denied."); } final File dirty = dirty(file); final File tmp = tmp(file); // first write to the dirty copy appendRecordToFile(record, dirty, filesystem); // atomically rename the dirty copy to the "live" copy while copying the live copy to // the "dirty" copy via a temporary hard link Files.createLink(tmp.toPath(), file.toPath()); Files.move( dirty.toPath(), file.toPath(), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE ); Files.move(tmp.toPath(), dirty.toPath()); // keep the dirty copy in sync with the live copy, which now has the write appendRecordToFile(record, dirty, filesystem); }
@Test public void shouldPreserveBackupOnWriteFailure() throws IOException { // Given final ConsumerRecord<byte[], byte[]> record = newStreamRecord("stream1"); replayFile.write(record); when(filesystem.outputStream(any(), anyBoolean())) .thenAnswer(BackupReplayFileTest::mockOutputStream); // When/Then: try { replayFile.write(record); Assert.fail("should throw IO exception"); } catch (final IOException e) { } // Then final List<String> commands = Files.readAllLines(internalReplayFile.toPath()); assertThat(commands.size(), is(1)); assertThat(commands.get(0), is( "\"stream/stream1/create\"" + KEY_VALUE_SEPARATOR + "{\"statement\":\"CREATE STREAM stream1 (id INT) WITH (kafka_topic='stream1')\"" + ",\"streamsProperties\":{},\"originalProperties\":{},\"plan\":null}" )); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void failureRateThresholdAboveHundredShouldFail() { custom().failureRateThreshold(101).build(); }
public static Map<String, SqlType> resolveOldAndNewLambdaMapping( final Map<String, SqlType> newMapping, final Map<String, SqlType> oldMapping ) { final HashMap<String, SqlType> updatedMapping = new HashMap<>(oldMapping); for (final Entry<String, SqlType> entry : newMapping.entrySet()) { final String key = entry.getKey(); if (oldMapping.containsKey(key) && !oldMapping.get(key).equals(newMapping.get(key))) { throw new IllegalStateException(String.format( "Could not map type %s to lambda variable %s, " + "%s was already mapped to %s", newMapping.get(key).toString(), key, key, oldMapping.get(key).toString())); } updatedMapping.put(key, entry.getValue()); } return updatedMapping; }
@Test public void shouldThrowOnMappingMismatch() { // Given final Map<String, SqlType> oldMapping = ImmutableMap.of("x", SqlTypes.BIGINT, "y", SqlTypes.STRING); final Map<String, SqlType> newMapping = ImmutableMap.of("x", SqlTypes.STRING, "y", SqlTypes.STRING); // When final Exception e = assertThrows(IllegalStateException.class, () -> LambdaMappingUtil.resolveOldAndNewLambdaMapping(newMapping, oldMapping) ); // Then assertThat(e.getMessage(), is("Could not map type STRING to lambda variable x, x was already mapped to BIGINT")); }
@Override public NewAnalysisError onFile(InputFile inputFile) { checkArgument(inputFile != null, "Cannot use a inputFile that is null"); checkState(this.inputFile == null, "onFile() already called"); this.inputFile = inputFile; return this; }
@Test public void test_no_storage() { DefaultAnalysisError analysisError = new DefaultAnalysisError(); assertThatThrownBy(() -> analysisError.onFile(inputFile).save()) .isInstanceOf(NullPointerException.class); }
public static boolean hasCollision(String topicA, String topicB) { return unifyCollisionChars(topicA).equals(unifyCollisionChars(topicB)); }
@Test public void testTopicHasCollision() { List<String> periodFirstMiddleLastNone = Arrays.asList(".topic", "to.pic", "topic.", "topic"); List<String> underscoreFirstMiddleLastNone = Arrays.asList("_topic", "to_pic", "topic_", "topic"); // Self for (String topic : periodFirstMiddleLastNone) assertTrue(Topic.hasCollision(topic, topic)); for (String topic : underscoreFirstMiddleLastNone) assertTrue(Topic.hasCollision(topic, topic)); // Same Position for (int i = 0; i < periodFirstMiddleLastNone.size(); ++i) assertTrue(Topic.hasCollision(periodFirstMiddleLastNone.get(i), underscoreFirstMiddleLastNone.get(i))); // Different Position Collections.reverse(underscoreFirstMiddleLastNone); for (int i = 0; i < periodFirstMiddleLastNone.size(); ++i) assertFalse(Topic.hasCollision(periodFirstMiddleLastNone.get(i), underscoreFirstMiddleLastNone.get(i))); }
public List<R> scanForResourcesUri(URI classpathResourceUri) { requireNonNull(classpathResourceUri, "classpathResourceUri must not be null"); if (CLASSPATH_SCHEME.equals(classpathResourceUri.getScheme())) { return scanForClasspathResource(resourceName(classpathResourceUri), NULL_FILTER); } return findResourcesForUri(classpathResourceUri, DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource()); }
@Test void scanForResourcesJarUriMalformed() { URI jarFileUri = new File("src/test/resources/io/cucumber/core/resource/test/jar-resource.jar").toURI(); URI resourceUri = URI .create("jar:file://" + jarFileUri.getSchemeSpecificPart() + "/com/example/package-jar-resource.txt"); IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> resourceScanner.scanForResourcesUri(resourceUri)); assertThat(exception.getMessage(), containsString("jar uri '" + resourceUri + "' must contain '!/'")); }
@PublicAPI(usage = ACCESS) public static String ensureSimpleName(String name) { // Excluding the '$' character might be incorrect, but since '$' is a valid character of a class name // and also the delimiter within the fully qualified name between an inner class and its enclosing class, // there is no clean way to derive the simple name from just a fully qualified class name without // further information // Luckily for imported classes we can read this information from the bytecode int lastIndexOfDot = name.lastIndexOf('.'); String partAfterDot = lastIndexOfDot >= 0 ? name.substring(lastIndexOfDot + 1) : name; int lastIndexOf$ = partAfterDot.lastIndexOf('$'); String simpleNameCandidate = lastIndexOf$ >= 0 ? partAfterDot.substring(lastIndexOf$ + 1) : partAfterDot; for (int i = 0; i < simpleNameCandidate.length(); i++) { if (Character.isJavaIdentifierStart(simpleNameCandidate.charAt(i))) { return simpleNameCandidate.substring(i); } } return ""; }
@Test @UseDataProvider("simple_name_test_cases") public void ensureSimpleName(String input, String expected) { assertThat(Formatters.ensureSimpleName(input)).isEqualTo(expected); }
public static Bigram[] of(Corpus corpus, int k, int minFrequency) { Bigram[] bigrams = new Bigram[k]; HeapSelect<Bigram> heap = new HeapSelect<>(bigrams); Iterator<smile.nlp.Bigram> iterator = corpus.bigrams(); while (iterator.hasNext()) { smile.nlp.Bigram bigram = iterator.next(); int c12 = corpus.count(bigram); if (c12 > minFrequency) { int c1 = corpus.count(bigram.w1); int c2 = corpus.count(bigram.w2); double score = likelihoodRatio(c1, c2, c12, corpus.size()); heap.add(new Bigram(bigram.w1, bigram.w2, c12, -score)); } } heap.sort(); Bigram[] collocations = new Bigram[k]; int n = 0; for (int i = 0; i < k; i++) { Bigram bigram = bigrams[k-i-1]; if (bigram != null) { collocations[n++] = new Bigram(bigram.w1, bigram.w2, bigram.count, -bigram.score); } } if (n < k) { collocations = Arrays.copyOf(collocations, n); } return collocations; }
@Test public void testFind_Corpus_int() { System.out.println("k = 10"); int k = 10; Bigram[] result = Bigram.of(corpus, k, 5); assertEquals(10, result.length); for (Bigram bigram : result) { System.out.println(bigram); } assertEquals(46, result[0].count); assertEquals(545.16, result[0].score, 1E-2); assertEquals(19, result[9].count); assertEquals(186.69, result[9].score, 1E-2); }
void shutdown(@Observes ShutdownEvent event) { if (jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) { backgroundJobServerInstance.get().stop(); } if (jobRunrBuildTimeConfiguration.dashboard().enabled()) { dashboardWebServerInstance.get().stop(); } storageProviderInstance.get().close(); }
@Test void jobRunrStarterStopsStorageProvider() { jobRunrStarter.shutdown(new ShutdownEvent()); verify(storageProvider).close(); }
@Override public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { final InputStream in; if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) { in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status); } else { in = local.getInputStream(); } final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest() .directS3Upload(true) .timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null) .timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null) .size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength()) .parentId(Long.parseLong(nodeid.getVersionId(file.getParent()))) .name(file.getName()); final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient()) .createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY); if(log.isDebugEnabled()) { log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse)); } final Map<Integer, TransferStatus> etags = new HashMap<>(); final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status); final List<Future<TransferStatus>> parts = new ArrayList<>(); try { final String random = new UUIDRandomStringService().random(); // Full size of file final long size = status.getLength() + status.getOffset(); long offset = 0; long remaining = status.getLength(); for(int partNumber = 1; remaining >= 0; partNumber++) { final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining); final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1); if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) { final Local temporary = temp.create(String.format("%s-%d", random, partNumber)); if(log.isDebugEnabled()) { log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary)); } final FileBuffer buffer = new FileBuffer(temporary); new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length) .transfer(in, new BufferOutputStream(buffer)); parts.add(this.submit(pool, file, temporary, buffer, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback)); } else { parts.add(this.submit(pool, file, local, Buffer.noop, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback)); } remaining -= length; offset += length; if(0L == remaining) { break; } } } finally { in.close(); } Interruptibles.awaitAll(parts) .forEach(part -> etags.put(part.getPart(), part)); final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest() .keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep")) .resolutionStrategy(CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE); if(status.getFilekey() != null) { final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class); final FileKey fileKey = reader.readValue(status.getFilekey().array()); final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey( TripleCryptConverter.toCryptoPlainFileKey(fileKey), TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer()) ); completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey)); } etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem( new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key))); if(log.isDebugEnabled()) { log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file)); } new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY); // Polling return new SDSUploadService(session, nodeid).await(file, status, createFileUploadResponse.getUploadId()).getNode(); } catch(CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) { throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file); } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } finally { temp.shutdown(); // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadExactMultipartSize() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final SDSDirectS3UploadFeature feature = new SDSDirectS3UploadFeature(session, nodeid, new SDSDelegatingWriteFeature(session, nodeid, new SDSDirectS3WriteFeature(session, nodeid))); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final byte[] random = RandomUtils.nextBytes(10 * 1024 * 1024); final OutputStream out = local.getOutputStream(false); IOUtils.write(random, out); out.close(); final TransferStatus status = new TransferStatus(); status.setLength(random.length); final Node node = feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledLoginCallback()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new SDSFindFeature(session, nodeid).find(test)); final PathAttributes attributes = new SDSAttributesFinderFeature(session, nodeid).find(test); assertEquals(random.length, attributes.getSize()); assertEquals(new SDSAttributesAdapter(session).toAttributes(node), attributes); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
public static Map<String, Expression> mapFromProperties(List<Property> properties) { Map<String, Expression> outputMap = new HashMap<>(); for (Property property : properties) { String key = property.getName().getValue(); if (outputMap.containsKey(key)) { throw new PrestoException(SYNTAX_ERROR, format("Duplicate property found: %s=%s and %s=%s", key, outputMap.get(key), key, property.getValue())); } outputMap.put(key, property.getValue()); } return ImmutableMap.copyOf(outputMap); }
@Test public void testMapFromProperties() { List<Property> propertyList = new ArrayList<>(); propertyList.add(new Property(new Identifier("a"), new StringLiteral("apple"))); propertyList.add(new Property(new Identifier("b"), new StringLiteral("ball"))); Map<String, Expression> stringExpressionMap = NodeUtils.mapFromProperties(propertyList); assertEquals(2, stringExpressionMap.size()); assertEquals(new StringLiteral("apple"), stringExpressionMap.get("a")); assertEquals(new StringLiteral("ball"), stringExpressionMap.get("b")); }
@Override protected String getDestination(Exchange exchange, Endpoint endpoint) { // when using toD for dynamic destination then extract from header String destination = exchange.getMessage().getHeader("CamelJmsDestinationName", String.class); if (destination == null) { destination = super.getDestination(exchange, endpoint); } return destination; }
@Test public void testGetDestination() { Exchange exchange = Mockito.mock(Exchange.class); Message message = Mockito.mock(Message.class); Endpoint endpoint = Mockito.mock(Endpoint.class); Mockito.when(exchange.getIn()).thenReturn(message); Mockito.when(exchange.getMessage()).thenReturn(message); Mockito.when(endpoint.getEndpointUri()).thenReturn("jms:cheese?clientId=123"); AbstractMessagingSpanDecorator decorator = new JmsSpanDecorator(); assertEquals("cheese", decorator.getDestination(exchange, endpoint)); }
public static String desensitizeBody(final boolean desensitized, final String source, final KeyWordMatch keyWordMatch, final String dataDesensitizeAlg) { if (StringUtils.hasLength(source) && desensitized) { Map<String, String> bodyMap = JsonUtils.jsonToMap(source, String.class); bodyMap.forEach((key, value) -> { if (keyWordMatch.matches(key)) { bodyMap.put(key, DataDesensitizeFactory.selectDesensitize(value, dataDesensitizeAlg)); } }); return JsonUtils.toJson(bodyMap); } else { return source; } }
@Test public void desensitizeBodyTest() { String noDesensitizedData = DataDesensitizeUtils.desensitizeBody(false, JSON_TEXT, keyWordMatch, DataDesensitizeEnum.MD5_ENCRYPT.getDataDesensitizeAlg()); Assertions.assertEquals(JSON_TEXT, noDesensitizedData); String desensitizedData = DataDesensitizeUtils.desensitizeBody(true, JSON_TEXT, keyWordMatch, DataDesensitizeEnum.MD5_ENCRYPT.getDataDesensitizeAlg()); Map<String, String> jsonMap = JsonUtils.jsonToMap(JSON_TEXT, String.class); jsonMap.put("name", DigestUtils.md5Hex(jsonMap.get("name"))); String jsonRet = JsonUtils.toJson(jsonMap); Assertions.assertEquals(jsonRet, desensitizedData); }
public static PrometheusMetric create(MetricMetadata meta, DataPointSnapshot snapshot) { MetricType type; double value = 0; // detect type and value from the snapshot class type if (snapshot instanceof CounterSnapshot.CounterDataPointSnapshot) { type = MetricType.COUNTER; value = ((CounterSnapshot.CounterDataPointSnapshot) snapshot).getValue(); } else if (snapshot instanceof GaugeSnapshot.GaugeDataPointSnapshot) { type = MetricType.GAUGE; value = ((GaugeSnapshot.GaugeDataPointSnapshot) snapshot).getValue(); } else { throw new RuntimeException( String.format("unknown create metric from prometheus type: %s", snapshot.getClass().getName())); } MetricUnit unit = MetricUnit.NOUNIT; if (meta.hasUnit()) { unit = UNIT_MAPPING.get(meta.getUnit()); if (unit == null) { unit = MetricUnit.NOUNIT; } } PrometheusMetric metric = new PrometheusMetric(meta.getName(), type, unit, meta.getHelp()); for (Label label : snapshot.getLabels()) { metric.addLabel(new MetricLabel(label.getName(), label.getValue())); } metric.setValue(value); return metric; }
@Test public void testBasic() throws Exception { MetricMetadata data = new MetricMetadata("test_prometheus_metric"); CounterSnapshot.CounterDataPointSnapshot s1 = CounterSnapshot.CounterDataPointSnapshot.builder().value(1.0).build(); GaugeSnapshot.GaugeDataPointSnapshot s2 = GaugeSnapshot.GaugeDataPointSnapshot.builder().value(1.0).build(); PrometheusMetric m1 = PrometheusMetric.create(data, s1); PrometheusMetric m2 = PrometheusMetric.create(data, s2); Assert.assertEquals(m1.getType(), Metric.MetricType.COUNTER); Assert.assertEquals(m2.getType(), Metric.MetricType.GAUGE); }
@Override public Table getTable(String dbName, String tblName) { ConnectorMetadata metadata = metadataOfTable(tblName); if (metadata == null) { metadata = metadataOfDb(dbName); } return metadata.getTable(dbName, tblName); }
@Test void testGetTable(@Mocked ConnectorMetadata connectorMetadata) { new Expectations() { { connectorMetadata.getTable("test_db", "test_tbl"); result = null; times = 1; } }; CatalogConnectorMetadata catalogConnectorMetadata = new CatalogConnectorMetadata( connectorMetadata, informationSchemaMetadata, metaMetadata ); Table table = catalogConnectorMetadata.getTable("test_db", "test_tbl"); assertNull(table); assertNotNull(catalogConnectorMetadata.getTable(InfoSchemaDb.DATABASE_NAME, "tables")); }
public Configuration get() { // get() call returns the original conf, which is mutable by caller confMutated = true; return conf; }
@Test public void testConstruction() { assertNotNull(DEFAULT_SERIALIZABLE_CONF); assertNotNull(DEFAULT_SERIALIZABLE_CONF.get()); thrown.expect(NullPointerException.class); new SerializableConfiguration(null); }
public static boolean getBoolean(String key, boolean def) { String value = get(key); if (value == null) { return def; } value = value.trim().toLowerCase(); if (value.isEmpty()) { return def; } if ("true".equals(value) || "yes".equals(value) || "1".equals(value)) { return true; } if ("false".equals(value) || "no".equals(value) || "0".equals(value)) { return false; } logger.warn( "Unable to parse the boolean system property '{}':{} - using the default value: {}", key, value, def ); return def; }
@Test public void testGetBooleanWithTrueValue() { System.setProperty("key", "true"); assertTrue(SystemPropertyUtil.getBoolean("key", false)); System.setProperty("key", "yes"); assertTrue(SystemPropertyUtil.getBoolean("key", false)); System.setProperty("key", "1"); assertTrue(SystemPropertyUtil.getBoolean("key", true)); }
@Override public Row deserialize(byte[] message) throws IOException { try { final JsonNode root = objectMapper.readTree(message); return (Row) runtimeConverter.convert(objectMapper, root); } catch (Throwable t) { if (ignoreParseErrors) { return null; } throw new IOException( format("Failed to deserialize JSON '%s'.", new String(message)), t); } }
@Test public void testMissingNode() throws Exception { // Root ObjectNode root = OBJECT_MAPPER.createObjectNode(); root.put("id", 123123123); byte[] serializedJson = OBJECT_MAPPER.writeValueAsBytes(root); TypeInformation<Row> rowTypeInformation = Types.ROW_NAMED(new String[] {"name"}, Types.STRING); JsonRowDeserializationSchema deserializationSchema = new JsonRowDeserializationSchema.Builder(rowTypeInformation).build(); Row row = new Row(1); assertThat( whenDeserializedWith(deserializationSchema) .equalsTo(row) .matches(serializedJson)) .isTrue(); deserializationSchema = new JsonRowDeserializationSchema.Builder(rowTypeInformation) .failOnMissingField() .build(); final JsonRowDeserializationSchema errorDs = deserializationSchema; assertThatThrownBy(() -> errorDs.deserialize(serializedJson)) .isInstanceOf(Exception.class) .hasMessageContaining("Failed to deserialize JSON"); // ignore-parse-errors ignores missing field exception too deserializationSchema = new JsonRowDeserializationSchema.Builder(rowTypeInformation) .ignoreParseErrors() .build(); assertThat( whenDeserializedWith(deserializationSchema) .equalsTo(row) .matches(serializedJson)) .isTrue(); assertThatThrownBy( () -> new JsonRowDeserializationSchema.Builder(rowTypeInformation) .failOnMissingField() .ignoreParseErrors() .build()) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining( "JSON format doesn't support failOnMissingField and ignoreParseErrors are both true"); }
public int read(final MessageHandler handler) { return read(handler, Integer.MAX_VALUE); }
@Test void shouldReadTwoMessages() { final int msgLength = 16; final int recordLength = HEADER_LENGTH + msgLength; final int alignedRecordLength = align(recordLength, ALIGNMENT); final long tail = alignedRecordLength * 2L; final long head = 0L; final int headIndex = (int)head; when(buffer.getLong(HEAD_COUNTER_INDEX)).thenReturn(head); when(buffer.getInt(typeOffset(headIndex))).thenReturn(MSG_TYPE_ID); when(buffer.getIntVolatile(lengthOffset(headIndex))).thenReturn(recordLength); when(buffer.getInt(typeOffset(headIndex + alignedRecordLength))).thenReturn(MSG_TYPE_ID); when(buffer.getIntVolatile(lengthOffset(headIndex + alignedRecordLength))).thenReturn(recordLength); final MutableInteger times = new MutableInteger(); final MessageHandler handler = (msgTypeId, buffer, index, length) -> times.increment(); final int messagesRead = ringBuffer.read(handler); assertThat(messagesRead, is(2)); assertThat(times.get(), is(2)); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer, times(1)).setMemory(headIndex, alignedRecordLength * 2, (byte)0); inOrder.verify(buffer, times(1)).putLongOrdered(HEAD_COUNTER_INDEX, tail); }
@SuppressWarnings("unchecked") @Override public <T extends EventListener<?>> T[] getListeners(final EventType eventType) { return eventType != null ? (T[]) this.listenerMap.get(eventType) : null; }
@Test void testGetListenersWithNullType() { assertNull(this.instance.getListeners(null)); }
public static Object eval(String expression, Map<String, Object> context) { return eval(expression, context, ListUtil.empty()); }
@Test public void rhinoTest(){ final ExpressionEngine engine = new RhinoEngine(); final Dict dict = Dict.create() .set("a", 100.3) .set("b", 45) .set("c", -199.100); final Object eval = engine.eval("a-(b-c)", dict, null); assertEquals(-143.8, (double)eval, 0); }
public static Result label(long durationInMillis) { double nbSeconds = durationInMillis / 1000.0; double nbMinutes = nbSeconds / 60; double nbHours = nbMinutes / 60; double nbDays = nbHours / 24; double nbYears = nbDays / 365; return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears); }
@Test public void age_in_seconds() { long now = System.currentTimeMillis(); DurationLabel.Result result = DurationLabel.label(now - System.currentTimeMillis()); assertThat(result.key()).isEqualTo("duration.seconds"); assertThat(result.value()).isNull(); }
public NumericIndicator squared() { // TODO: implement pow(n); a few others return this.multipliedBy(this); }
@Test public void squared() { final NumericIndicator numericIndicator = NumericIndicator.of(cp1); final NumericIndicator dynamicOp = numericIndicator.squared(); assertNumEquals(1, dynamicOp.getValue(0)); assertNumEquals(81, dynamicOp.getValue(8)); }
@Udf(description = "Returns the hex-encoded md5 hash of the input string") public String md5( @UdfParameter(description = "The input string") final String s ) { if (s == null) { return null; } return DigestUtils.md5Hex(s); }
@Test public void shouldReturnHexString() { assertThat(udf.md5("one"), is("f97c5d29941bfb1b2fdab0874906ab82")); assertThat(udf.md5("two"), is("b8a9f715dbb64fd5c56e7783c6820a61")); assertThat(udf.md5("three"), is("35d6d33467aae9a2e3dccb4b6b027878")); assertThat(udf.md5(""), is("d41d8cd98f00b204e9800998ecf8427e")); assertThat(udf.md5(" "), is("7215ee9c7d9dc229d2921a40e899ec5f")); // Sanity check some strange Unicode characters assertThat(udf.md5("★☀☺"), is("670884f26d7be1298886c2e9d7c248a4")); // Now generate random input strings and compare results to DigestUtils for (int i = 0; i < 1000; i++) { String uuid = UUID.randomUUID().toString(); assertThat(udf.md5(uuid), is(DigestUtils.md5Hex(uuid))); } }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldCastDouble() { // When: final BigDecimal decimal = DecimalUtil.cast((Double)1.1, 2, 1); // Then: assertThat(decimal, is(new BigDecimal("1.1"))); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseUCTest2() { final String uaString = "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X; zh-CN) AppleWebKit/537.51.1 (KHTML, like Gecko) Mobile/16G102 UCBrowser/12.7.6.1251 Mobile AliApp(TUnionSDK/0.1.20.3)"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("UCBrowser", ua.getBrowser().toString()); assertEquals("12.7.6.1251", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.51.1", ua.getEngineVersion()); assertEquals("iPhone", ua.getOs().toString()); assertEquals("12_4_1", ua.getOsVersion()); assertEquals("iPhone", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
@Override public void run() { if (!redoService.isConnected()) { LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task"); return; } try { redoForInstances(); redoForSubscribes(); } catch (Exception e) { LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e); } }
@Test void testRunRedoRegisterInstanceWithOtherException() throws NacosException { Set<InstanceRedoData> mockData = generateMockInstanceData(false, false, true); when(redoService.findInstanceRedoData()).thenReturn(mockData); doThrow(new RuntimeException("test")).when(clientProxy).doRegisterService(SERVICE, GROUP, INSTANCE); redoTask.run(); // Not any exception thrown }
@Override public ElasticAgentPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { String pluginId = descriptor.id(); PluggableInstanceSettings pluggableInstanceSettings = null; if (!extension.supportsClusterProfiles(pluginId)) { pluggableInstanceSettings = getPluginSettingsAndView(descriptor, extension); } return new ElasticAgentPluginInfo(descriptor, elasticElasticAgentProfileSettings(pluginId), elasticClusterProfileSettings(pluginId), image(pluginId), pluggableInstanceSettings, capabilities(pluginId)); }
@Test public void shouldNotFetchPluginSettingsForPluginsSupportingClusterProfiles() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); when(extension.supportsClusterProfiles("plugin1")).thenReturn(true); ElasticAgentPluginInfoBuilder builder = new ElasticAgentPluginInfoBuilder(extension); ElasticAgentPluginInfo pluginInfo = builder.pluginInfoFor(descriptor); assertNull(pluginInfo.getPluginSettings()); verify(extension, never()).getPluginSettingsConfiguration(descriptor.id()); verify(extension, never()).getPluginSettingsView(descriptor.id()); }
@Udf(description = "Converts a string representation of a date in the given format" + " into a DATE value.") public Date parseDate( @UdfParameter( description = "The string representation of a date.") final String formattedDate, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.text.SimpleDateFormat.") final String formatPattern) { if (formattedDate == null || formatPattern == null) { return null; } try { final long time = formatters.get(formatPattern).parse(formattedDate).getTime(); if (time % MILLIS_IN_DAY != 0) { throw new KsqlFunctionException("Date format contains time field."); } return new Date(time); } catch (final ExecutionException | RuntimeException | ParseException e) { throw new KsqlFunctionException("Failed to parse date '" + formattedDate + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldHandleNullDate() { // When: final Date result = udf.parseDate(null, "dd-MMM-yyyy"); // Then: assertThat(result, is(nullValue())); }
@Override public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff) throws IOException, InterruptedException { IOException lastException = null; do { try { Collection<Metadata> files = FileSystems.match(filePattern).metadata(); LOG.debug( "Found file(s) {} by matching the path: {}", files.stream() .map(Metadata::resourceId) .map(ResourceId::getFilename) .collect(Collectors.joining(",")), filePattern); if (files.isEmpty()) { continue; } // Read data from file paths return readLines(files); } catch (IOException e) { // Ignore and retry lastException = e; LOG.warn("Error in file reading. Ignore and retry."); } } while (BackOffUtils.next(sleeper, backOff)); // Failed after max retries throw new IOException( String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES), lastException); }
@Test public void testReadMultipleShardsWithoutShardNumber() throws Exception { String contents1 = "To be or not to be, "; String contents2 = "it is not a question."; File tmpFile1 = tmpFolder.newFile("result"); File tmpFile2 = tmpFolder.newFile("tmp"); Files.asCharSink(tmpFile1, StandardCharsets.UTF_8).write(contents1); Files.asCharSink(tmpFile2, StandardCharsets.UTF_8).write(contents2); FilePatternMatchingShardedFile shardedFile = new FilePatternMatchingShardedFile(filePattern); assertThat(shardedFile.readFilesWithRetries(), containsInAnyOrder(contents1, contents2)); }
@Override public void zoomOut() { zoomOut(true); }
@Test public void zoomOutTest() { MapViewPosition mapViewPosition = new MapViewPosition(new DisplayModel()); mapViewPosition.setZoomLevel((byte) 1); Assert.assertEquals(1, mapViewPosition.getZoomLevel()); mapViewPosition.zoomOut(); Assert.assertEquals(0, mapViewPosition.getZoomLevel()); mapViewPosition.setZoomLevel((byte) 0); Assert.assertEquals(0, mapViewPosition.getZoomLevel()); mapViewPosition.zoomOut(); Assert.assertEquals(0, mapViewPosition.getZoomLevel()); }
@Override public Map<String, String> findBundlesToSplit(final LoadData loadData, final PulsarService pulsar) { bundleCache.clear(); namespaceBundleCount.clear(); final ServiceConfiguration conf = pulsar.getConfiguration(); int maxBundleCount = conf.getLoadBalancerNamespaceMaximumBundles(); long maxBundleTopics = conf.getLoadBalancerNamespaceBundleMaxTopics(); long maxBundleSessions = conf.getLoadBalancerNamespaceBundleMaxSessions(); long maxBundleMsgRate = conf.getLoadBalancerNamespaceBundleMaxMsgRate(); long maxBundleBandwidth = conf.getLoadBalancerNamespaceBundleMaxBandwidthMbytes() * LoadManagerShared.MIBI; loadData.getBrokerData().forEach((broker, brokerData) -> { LocalBrokerData localData = brokerData.getLocalData(); for (final Map.Entry<String, NamespaceBundleStats> entry : localData.getLastStats().entrySet()) { final String bundle = entry.getKey(); final NamespaceBundleStats stats = entry.getValue(); if (stats.topics < 2) { if (log.isDebugEnabled()) { log.debug("The count of topics on the bundle {} is less than 2, skip split!", bundle); } continue; } double totalMessageRate = 0; double totalMessageThroughput = 0; // Attempt to consider long-term message data, otherwise effectively ignore. if (loadData.getBundleData().containsKey(bundle)) { final TimeAverageMessageData longTermData = loadData.getBundleData().get(bundle).getLongTermData(); totalMessageRate = longTermData.totalMsgRate(); totalMessageThroughput = longTermData.totalMsgThroughput(); } if (stats.topics > maxBundleTopics || (maxBundleSessions > 0 && (stats.consumerCount + stats.producerCount > maxBundleSessions)) || totalMessageRate > maxBundleMsgRate || totalMessageThroughput > maxBundleBandwidth) { final String namespace = LoadManagerShared.getNamespaceNameFromBundleName(bundle); try { final int bundleCount = pulsar.getNamespaceService() .getBundleCount(NamespaceName.get(namespace)); if ((bundleCount + namespaceBundleCount.getOrDefault(namespace, 0)) < maxBundleCount) { log.info("The bundle {} is considered to be unload. Topics: {}/{}, Sessions: ({}+{})/{}, " + "Message Rate: {}/{} (msgs/s), Message Throughput: {}/{} (MB/s)", bundle, stats.topics, maxBundleTopics, stats.producerCount, stats.consumerCount, maxBundleSessions, totalMessageRate, maxBundleMsgRate, totalMessageThroughput / LoadManagerShared.MIBI, maxBundleBandwidth / LoadManagerShared.MIBI); bundleCache.put(bundle, broker); int bundleNum = namespaceBundleCount.getOrDefault(namespace, 0); namespaceBundleCount.put(namespace, bundleNum + 1); } else { if (log.isDebugEnabled()) { log.debug( "Could not split namespace bundle {} because namespace {} has too many bundles:" + "{}", bundle, namespace, bundleCount); } } } catch (Exception e) { log.warn("Error while getting bundle count for namespace {}", namespace, e); } } } }); return bundleCache; }
@Test public void testLoadBalancerNamespaceMaximumBundles() throws Exception { pulsar.getConfiguration().setLoadBalancerNamespaceMaximumBundles(3); final BundleSplitterTask bundleSplitterTask = new BundleSplitterTask(); LoadData loadData = new LoadData(); LocalBrokerData brokerData = new LocalBrokerData(); Map<String, NamespaceBundleStats> lastStats = new HashMap<>(); final NamespaceBundleStats namespaceBundleStats = new NamespaceBundleStats(); namespaceBundleStats.topics = 5; lastStats.put("ten/ns/0x00000000_0x20000000", namespaceBundleStats); final NamespaceBundleStats namespaceBundleStats2 = new NamespaceBundleStats(); namespaceBundleStats2.topics = 5; lastStats.put("ten/ns/0x20000000_0x40000000", namespaceBundleStats2); final NamespaceBundleStats namespaceBundleStats3 = new NamespaceBundleStats(); namespaceBundleStats3.topics = 5; lastStats.put("ten/ns/0x40000000_0x60000000", namespaceBundleStats3); brokerData.setLastStats(lastStats); loadData.getBrokerData().put("broker", new BrokerData(brokerData)); BundleData bundleData1 = new BundleData(); TimeAverageMessageData averageMessageData1 = new TimeAverageMessageData(); averageMessageData1.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); averageMessageData1.setMsgRateOut(1); bundleData1.setLongTermData(averageMessageData1); loadData.getBundleData().put("ten/ns/0x00000000_0x20000000", bundleData1); BundleData bundleData2 = new BundleData(); TimeAverageMessageData averageMessageData2 = new TimeAverageMessageData(); averageMessageData2.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); averageMessageData2.setMsgRateOut(1); bundleData2.setLongTermData(averageMessageData2); loadData.getBundleData().put("ten/ns/0x20000000_0x40000000", bundleData2); BundleData bundleData3 = new BundleData(); TimeAverageMessageData averageMessageData3 = new TimeAverageMessageData(); averageMessageData3.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); averageMessageData3.setMsgRateOut(1); bundleData3.setLongTermData(averageMessageData3); loadData.getBundleData().put("ten/ns/0x40000000_0x60000000", bundleData3); int currentBundleCount = pulsar.getNamespaceService().getBundleCount(NamespaceName.get("ten/ns")); final Map<String, String> bundlesToSplit = bundleSplitterTask.findBundlesToSplit(loadData, pulsar); Assert.assertEquals(bundlesToSplit.size() + currentBundleCount, pulsar.getConfiguration().getLoadBalancerNamespaceMaximumBundles()); }
@VisibleForTesting static boolean isInPriorNetwork(String ip) { ip = ip.trim(); for (String cidr : PRIORITY_CIDRS) { cidr = cidr.trim(); IPAddressString network = new IPAddressString(cidr); IPAddressString address = new IPAddressString(ip); if (network.contains(address)) { return true; } } return false; }
@Test public void cidrTest() { List<String> priorityCidrs = FrontendOptions.PRIORITY_CIDRS; priorityCidrs.add("192.168.5.136/32"); priorityCidrs.add("2001:db8::/32"); FrontendOptions frontendOptions = new FrontendOptions(); boolean inPriorNetwork = frontendOptions.isInPriorNetwork("127.0.0.1"); Assert.assertEquals(false, inPriorNetwork); inPriorNetwork = frontendOptions.isInPriorNetwork("192.168.5.136"); Assert.assertEquals(true, inPriorNetwork); inPriorNetwork = frontendOptions.isInPriorNetwork("2001:db8::1"); Assert.assertTrue(inPriorNetwork); }
@SuppressWarnings("unchecked") @Override protected Object createObject(ValueWrapper<Object> initialInstance, String className, Map<List<String>, Object> params, ClassLoader classLoader) { // simple types if (initialInstance.isValid() && !(initialInstance.getValue() instanceof Map)) { return initialInstance.getValue(); } Map<String, Object> toReturn = (Map<String, Object>) initialInstance.orElseGet(HashMap::new); for (Map.Entry<List<String>, Object> listObjectEntry : params.entrySet()) { // direct mapping already considered if (listObjectEntry.getKey().isEmpty()) { continue; } List<String> allSteps = listObjectEntry.getKey(); List<String> steps = allSteps.subList(0, allSteps.size() - 1); String lastStep = allSteps.get(allSteps.size() - 1); Map<String, Object> targetMap = toReturn; for (String step : steps) { targetMap = (Map<String, Object>) targetMap.computeIfAbsent(step, k -> new HashMap<>()); } targetMap.put(lastStep, listObjectEntry.getValue()); } return toReturn; }
@SuppressWarnings("unchecked") @Test public void createObject() { Map<List<String>, Object> params = new HashMap<>(); params.put(List.of("creator", "name"), "TestName"); params.put(List.of("creator", "surname"), "TestSurname"); params.put(List.of("age"), BigDecimal.valueOf(10)); ValueWrapper<Object> initialInstance = runnerHelper.getDirectMapping(params); Object objectRaw = runnerHelper.createObject(initialInstance, String.class.getCanonicalName(), params, getClass().getClassLoader()); assertThat(objectRaw).isInstanceOf(Map.class); Map<String, Object> object = (Map<String, Object>) objectRaw; assertThat(object).containsEntry("age", BigDecimal.valueOf(10)); assertThat(object.get("creator")).isInstanceOf(Map.class); Map<String, Object> creator = (Map<String, Object>) object.get("creator"); assertThat(creator).containsEntry("name", "TestName").containsEntry("surname", "TestSurname"); }
@ShellMethod(key = "repair overwrite-hoodie-props", value = "Overwrite hoodie.properties with provided file. Risky operation. Proceed with caution!") public String overwriteHoodieProperties( @ShellOption(value = {"--new-props-file"}, help = "Path to a properties file on local filesystem to overwrite the table's hoodie.properties with") final String overwriteFilePath) throws IOException { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); Properties newProps = new Properties(); try (FileInputStream fileInputStream = new FileInputStream(overwriteFilePath)) { newProps.load(fileInputStream); } Map<String, String> oldProps = client.getTableConfig().propsMap(); // Copy Initial Version from old-props to new-props if (oldProps.containsKey(HoodieTableConfig.INITIAL_VERSION.key())) { newProps.put(HoodieTableConfig.INITIAL_VERSION.key(), oldProps.get(HoodieTableConfig.INITIAL_VERSION.key())); } HoodieTableConfig.create(client.getStorage(), client.getMetaPath(), newProps); // reload new props as checksum would have been added newProps = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient()).getTableConfig().getProps(); TreeSet<String> allPropKeys = new TreeSet<>(); allPropKeys.addAll( newProps.keySet().stream().map(Object::toString).collect(Collectors.toSet())); allPropKeys.addAll(oldProps.keySet()); String[][] rows = new String[allPropKeys.size()][]; int ind = 0; for (String propKey : allPropKeys) { String[] row = new String[] { propKey, oldProps.getOrDefault(propKey, "null"), newProps.getOrDefault(propKey, "null").toString() }; rows[ind++] = row; } return HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_HOODIE_PROPERTY, HoodieTableHeaderFields.HEADER_OLD_VALUE, HoodieTableHeaderFields.HEADER_NEW_VALUE}, rows); }
@Test public void testOverwriteHoodieProperties() throws IOException { URL newProps = this.getClass().getClassLoader().getResource("table-config.properties"); assertNotNull(newProps, "New property file must exist"); Object cmdResult = shell.evaluate(() -> "repair overwrite-hoodie-props --new-props-file " + newProps.getPath()); assertTrue(ShellEvaluationResultUtil.isSuccess(cmdResult)); Map<String, String> oldProps = HoodieCLI.getTableMetaClient().getTableConfig().propsMap(); // after overwrite, the stored value in .hoodie is equals to which read from properties. HoodieTableConfig tableConfig = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient()).getTableConfig(); Map<String, String> result = tableConfig.propsMap(); // validate table checksum assertTrue(result.containsKey(TABLE_CHECKSUM.key())); assertTrue(validateChecksum(tableConfig.getProps())); Properties expectProps = new Properties(); expectProps.load(new FileInputStream(newProps.getPath())); Map<String, String> expected = expectProps.entrySet().stream() .collect(Collectors.toMap(e -> String.valueOf(e.getKey()), e -> String.valueOf(e.getValue()))); expected.putIfAbsent(TABLE_CHECKSUM.key(), String.valueOf(generateChecksum(tableConfig.getProps()))); expected.putIfAbsent(DROP_PARTITION_COLUMNS.key(), String.valueOf(DROP_PARTITION_COLUMNS.defaultValue())); assertEquals(expected, result); // check result List<String> allPropsStr = Arrays.asList(NAME.key(), TYPE.key(), VERSION.key(), ARCHIVELOG_FOLDER.key(), TIMELINE_LAYOUT_VERSION.key(), TABLE_CHECKSUM.key(), DROP_PARTITION_COLUMNS.key()); String[][] rows = allPropsStr.stream().sorted().map(key -> new String[] {key, oldProps.getOrDefault(key, "null"), result.getOrDefault(key, "null")}) .toArray(String[][]::new); String expect = HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_HOODIE_PROPERTY, HoodieTableHeaderFields.HEADER_OLD_VALUE, HoodieTableHeaderFields.HEADER_NEW_VALUE}, rows); expect = removeNonWordAndStripSpace(expect); String got = removeNonWordAndStripSpace(cmdResult.toString()); assertEquals(expect, got); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseSpecificOverVarArgsAtEndReversedInsertionOrder() { // Given: givenFunctions( function(OTHER, 2, INT, INT, STRING_VARARGS), function(EXPECTED, -1, INT, INT, STRING, STRING, STRING, STRING) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of( SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING) )); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public static KeyValueIterator<Windowed<GenericKey>, GenericRow> fetch( final ReadOnlySessionStore<GenericKey, GenericRow> store, final GenericKey key ) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<GenericKey, GenericRow>> stores = getStores(store); final Function<ReadOnlySessionStore<GenericKey, GenericRow>, KeyValueIterator<Windowed<GenericKey>, GenericRow>> fetchFunc = sessionStore -> fetchUncached(sessionStore, key); return findFirstNonEmptyIterator(stores, fetchFunc); }
@Test public void shouldAvoidNonSessionStore() throws IllegalAccessException { when(provider.stores(any(), any())).thenReturn(ImmutableList.of(meteredSessionStore)); SERDES_FIELD.set(meteredSessionStore, serdes); when(serdes.rawKey(any())).thenReturn(BYTES); when(meteredSessionStore.wrapped()).thenReturn(wrappedSessionStore); when(wrappedSessionStore.wrapped()).thenReturn(stateStore); when(wrappedSessionStore.fetch(any())).thenReturn(storeIterator); when(storeIterator.hasNext()).thenReturn(false); SessionStoreCacheBypass.fetch(store, SOME_KEY); verify(wrappedSessionStore).fetch(new Bytes(BYTES)); }
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) { switch (trigger.getTriggerCase()) { case AFTER_ALL: return AfterAllStateMachine.of( stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList())); case AFTER_ANY: return AfterFirstStateMachine.of( stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList())); case AFTER_END_OF_WINDOW: return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow()); case ELEMENT_COUNT: return AfterPaneStateMachine.elementCountAtLeast( trigger.getElementCount().getElementCount()); case AFTER_SYNCHRONIZED_PROCESSING_TIME: return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement(); case DEFAULT: return DefaultTriggerStateMachine.of(); case NEVER: return NeverStateMachine.ever(); case ALWAYS: return ReshuffleTriggerStateMachine.create(); case OR_FINALLY: return stateMachineForTrigger(trigger.getOrFinally().getMain()) .orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally())); case REPEAT: return RepeatedlyStateMachine.forever( stateMachineForTrigger(trigger.getRepeat().getSubtrigger())); case AFTER_EACH: return AfterEachStateMachine.inOrder( stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList())); case AFTER_PROCESSING_TIME: return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime()); case TRIGGER_NOT_SET: throw new IllegalArgumentException( String.format("Required field 'trigger' not set on %s", trigger)); default: throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger)); } }
@Test public void testAfterFirstTranslation() { RunnerApi.Trigger trigger = RunnerApi.Trigger.newBuilder() .setAfterAny( RunnerApi.Trigger.AfterAny.newBuilder() .addSubtriggers(subtrigger1) .addSubtriggers(subtrigger2)) .build(); AfterFirstStateMachine machine = (AfterFirstStateMachine) TriggerStateMachines.stateMachineForTrigger(trigger); assertThat(machine, equalTo(AfterFirstStateMachine.of(submachine1, submachine2))); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetPropertiesFromJsonFailsWhenDeserializingNullValues() { String action = MetadataUpdateParser.SET_PROPERTIES; Map<String, String> props = Maps.newHashMap(); props.put("prop1", "val1"); props.put("prop2", null); String propsMap = "{\"prop1\":\"val1\",\"prop2\":null}"; String json = String.format("{\"action\":\"%s\",\"updated\":%s}", action, propsMap); assertThatThrownBy(() -> MetadataUpdateParser.fromJson(json)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to a string value: prop2: null"); }
@Override public ClientDetailsEntity updateClient(ClientDetailsEntity oldClient, ClientDetailsEntity newClient) throws IllegalArgumentException { if (oldClient != null && newClient != null) { for (String uri : newClient.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } // if the client is flagged to allow for refresh tokens, make sure it's got the right scope ensureRefreshTokenConsistency(newClient); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(newClient); // check consistency when using HEART mode checkHeartMode(newClient); // check the sector URI checkSectorIdentifierUri(newClient); // make sure a client doesn't get any special system scopes ensureNoReservedScopes(newClient); return clientRepository.updateClient(oldClient.getId(), newClient); } throw new IllegalArgumentException("Neither old client or new client can be null!"); }
@Test(expected = IllegalArgumentException.class) public void updateClient_blacklistedUri() { ClientDetailsEntity oldClient = Mockito.mock(ClientDetailsEntity.class); ClientDetailsEntity newClient = Mockito.mock(ClientDetailsEntity.class); String badSite = "badsite.xxx"; Mockito.when(newClient.getRegisteredRedirectUri()).thenReturn(Sets.newHashSet(badSite)); Mockito.when(blacklistedSiteService.isBlacklisted(badSite)).thenReturn(true); service.updateClient(oldClient, newClient); }
@Override public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) { if (!shouldHandle(instances)) { return instances; } List<Object> result = getTargetInstancesByRules(targetName, instances); return super.handle(targetName, result, requestData); }
@Test public void testGetTargetInstancesByTagRulesWithoutPolicySceneTwo() { RuleInitializationUtils.initAZTagMatchRule(); List<Object> instances = new ArrayList<>(); ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", "az2"); ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", "az3"); instances.add(instance1); instances.add(instance2); Map<String, String> metadata = new HashMap<>(); metadata.put("zone", "az1"); AppCache.INSTANCE.setMetadata(metadata); List<Object> targetInstances = tagRouteHandler.handle("foo", instances, new RequestData(null, null, null)); Assert.assertEquals(2, targetInstances.size()); ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
public void error(String errorCode, String errorDescription, String errorUri) { if (Objects.requireNonNull(errorCode).isEmpty()) throw new IllegalArgumentException("error code must not be empty"); this.errorCode = errorCode; this.errorDescription = errorDescription; this.errorUri = errorUri; this.token = null; }
@Test public void testError() { String errorCode = "errorCode"; String errorDescription = "errorDescription"; String errorUri = "errorUri"; OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); callback.error(errorCode, errorDescription, errorUri); assertEquals(errorCode, callback.errorCode()); assertEquals(errorDescription, callback.errorDescription()); assertEquals(errorUri, callback.errorUri()); assertNull(callback.token()); }
@Override public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) { if (requestData == null) { return super.handle(targetName, instances, null); } if (!shouldHandle(instances)) { return instances; } List<Object> result = routerConfig.isUseRequestRouter() ? getTargetInstancesByRequest(targetName, instances, requestData.getTag()) : getTargetInstancesByRules(targetName, instances, requestData.getPath(), requestData.getTag()); return super.handle(targetName, result, requestData); }
@Test public void testGetTargetInstancesByRequestWithMismatch() { config.setUseRequestRouter(true); config.setRequestTags(Arrays.asList("foo", "bar", "version")); List<Object> instances = new ArrayList<>(); ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", Collections.singletonMap("foo", "bar1")); instances.add(instance1); ServiceInstance instance2 = TestDefaultServiceInstance .getTestDefaultServiceInstance("1.0.1", Collections.singletonMap("bar", "bar2")); instances.add(instance2); ServiceInstance instance3 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.2"); instances.add(instance3); // Bar does not match: If the bar1 instance does not match, the instance without the bar label is matched Map<String, List<String>> header = new HashMap<>(); header.put("bar", Collections.singletonList("bar1")); List<Object> targetInstances = flowRouteHandler.handle("foo", instances, new RequestData(header, null, null)); Assert.assertEquals(2, targetInstances.size()); Assert.assertFalse(targetInstances.contains(instance2)); // If the bar: bar1 instance does not match, the instance without the bar tag is preferentially matched, // and if there is no instance without the bar tag, an empty list is returned List<Object> sameInstances = new ArrayList<>(); ServiceInstance sameInstance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", Collections.singletonMap("bar", "bar3")); sameInstances.add(sameInstance1); ServiceInstance sameInstance2 = TestDefaultServiceInstance .getTestDefaultServiceInstance("1.0.1", Collections.singletonMap("bar", "bar2")); sameInstances.add(sameInstance2); header.clear(); header.put("bar", Collections.singletonList("bar1")); targetInstances = flowRouteHandler.handle("foo", sameInstances, new RequestData(header, null, null)); Assert.assertEquals(0, targetInstances.size()); // If the version: 1.0.3 instance does not match, all versions of the instance are returned header.clear(); header.put("version", Collections.singletonList("1.0.3")); targetInstances = flowRouteHandler.handle("foo", instances, new RequestData(header, null, null)); Assert.assertEquals(3, targetInstances.size()); // If no header is passed, the instance without a tag is matched header.clear(); targetInstances = flowRouteHandler.handle("foo", instances, new RequestData(header, null, null)); Assert.assertEquals(1, targetInstances.size()); Assert.assertEquals(instance3, targetInstances.get(0)); // If no header is passed, the instance without a label is matched first, // and if there are no instances without a label, all instances are returned header.clear(); targetInstances = flowRouteHandler.handle("foo", sameInstances, new RequestData(header, null, null)); Assert.assertEquals(sameInstances, targetInstances); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testGroupByOrdinalsWithWildcard() { // TODO: verify output analyze("SELECT t1.*, a FROM t1 GROUP BY 1,2,c,d"); }
public static Expression generateFilterExpression(SearchArgument sarg) { return translate(sarg.getExpression(), sarg.getLeaves()); }
@Test public void testInOperand() { SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); SearchArgument arg = builder.startAnd().in("salary", PredicateLeaf.Type.LONG, 3000L, 4000L).end().build(); UnboundPredicate expected = Expressions.in("salary", 3000L, 4000L); UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg); assertEquals(actual.op(), expected.op()); assertEquals(actual.literals(), expected.literals()); assertEquals(actual.ref().name(), expected.ref().name()); }
@Override public Stream<MappingField> resolveAndValidateFields( boolean isKey, List<MappingField> userFields, Map<String, String> options, InternalSerializationService serializationService ) { Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey); Class<?> typeClass = getMetadata(fieldsByPath) .<Class<?>>map(KvMetadataJavaResolver::loadClass) .orElseGet(() -> loadClass(options, isKey)); QueryDataType type = QueryDataTypeUtils.resolveTypeForClass(typeClass); if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) { return userFields.isEmpty() ? resolvePrimitiveField(isKey, type) : resolveAndValidatePrimitiveField(isKey, fieldsByPath, type); } else { return userFields.isEmpty() ? resolveObjectFields(isKey, typeClass) : resolveAndValidateObjectFields(isKey, fieldsByPath, typeClass); } }
@Test @Parameters({ "true, __key", "false, this" }) public void test_resolvePrimitiveField(boolean key, String path) { Map<String, String> options = Map.of( (key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT, (key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), int.class.getName() ); Stream<MappingField> fields = INSTANCE.resolveAndValidateFields(key, emptyList(), options, null); assertThat(fields).containsExactly(field(path, QueryDataType.INT, QueryPath.create(path).toString())); }
@Override public byte getByte(final int columnIndex) throws SQLException { return (byte) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, byte.class), byte.class); }
@Test void assertGetByteWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, byte.class)).thenReturn((byte) 1); assertThat(shardingSphereResultSet.getByte("label"), is((byte) 1)); }
@Override public Object parse(final String property, final Object value) { if (property.equalsIgnoreCase(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT)) { validator.validate(property, value); return value; } final ConfigItem configItem = resolver.resolve(property, true) .orElseThrow(() -> new PropertyNotFoundException(property)); final Object parsedValue = configItem.parseValue(value); validator.validate(configItem.getPropertyName(), parsedValue); return parsedValue; }
@Test public void shouldThrowIfResolverFailsToResolve() { // Given: when(resolver.resolve(anyString(), anyBoolean())) .thenReturn(Optional.empty()); // When: final IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> parser.parse("Unknown", "100") ); // Then: assertThat(e.getMessage(), containsString( "Not recognizable as ksql, streams, consumer, or producer property: 'Unknown'" )); }
@Override public int hashCode() { return Objects.hash(targetMap); }
@Test void testHashCode() { Map<String, Object> map = new LowerCaseLinkHashMap<>(lowerCaseLinkHashMap); Assertions.assertEquals(lowerCaseLinkHashMap.hashCode(), map.hashCode()); map.put("equals", "equals2"); Assertions.assertNotEquals(lowerCaseLinkHashMap.hashCode(), map.hashCode()); }
@Override public CRArtifact deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException { return determineJsonElementForDistinguishingImplementers(json, context, TYPE, TypeAdapter.ARTIFACT_ORIGIN); }
@Test public void shouldInstantiateATaskOfTypeAnt() { JsonObject jsonObject = new JsonObject(); jsonObject.addProperty("type", "test"); artifactTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext); verify(jsonDeserializationContext).deserialize(jsonObject, CRBuiltInArtifact.class); }
@Override public String getName() { return "Poetry Analyzer"; }
@Test public void testName() { assertEquals("Analyzer name wrong.", "Poetry Analyzer", analyzer.getName()); }
public ExternalIssueReport parse(Path reportPath) { try (Reader reader = Files.newBufferedReader(reportPath, StandardCharsets.UTF_8)) { ExternalIssueReport report = gson.fromJson(reader, ExternalIssueReport.class); externalIssueReportValidator.validate(report, reportPath); return report; } catch (JsonIOException | IOException e) { throw new IllegalStateException("Failed to read external issues report '" + reportPath + "'", e); } catch (JsonSyntaxException e) { throw new IllegalStateException("Failed to read external issues report '" + reportPath + "': invalid JSON syntax", e); } }
@Test public void parse_whenInvalidDeprecatedFormat_shouldFail() { reportPath = Paths.get(DEPRECATED_REPORTS_LOCATION + "report_invalid_json.json"); assertThatThrownBy(() -> externalIssueReportParser.parse(reportPath)) .isInstanceOf(IllegalStateException.class) .hasMessage("Failed to read external issues report 'src/test/resources/org/sonar/scanner/externalissue/report_invalid_json.json': " + "invalid JSON syntax"); }
boolean needsMigration() { File mappingFile = UserIdMapper.getConfigFile(usersDirectory); if (mappingFile.exists() && mappingFile.isFile()) { LOGGER.finest("User mapping file already exists. No migration needed."); return false; } File[] userDirectories = listUserDirectories(); return userDirectories != null && userDirectories.length > 0; }
@Test public void migrateMultipleUsers() throws IOException { File usersDirectory = createTestDirectory(getClass(), name); IdStrategy idStrategy = IdStrategy.CASE_INSENSITIVE; UserIdMigrator migrator = new UserIdMigrator(usersDirectory, idStrategy); TestUserIdMapper mapper = new TestUserIdMapper(usersDirectory, idStrategy); mapper.init(); assertThat(migrator.needsMigration(), is(false)); mapper = new TestUserIdMapper(usersDirectory, idStrategy); mapper.init(); assertThat(mapper.getConvertedUserIds().size(), is(3)); assertThat(mapper.isMapped("fred"), is(true)); assertThat(mapper.isMapped("foo/bar"), is(true)); assertThat(mapper.isMapped("zzz\u1000"), is(true)); }
@Override public StorageObject upload(final Path file, Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback prompt) throws BackgroundException { if(this.threshold(status)) { try { return new S3MultipartUploadService(session, writer, acl).upload(file, local, throttle, listener, status, prompt); } catch(NotfoundException | InteroperabilityException e) { log.warn(String.format("Failure %s using multipart upload. Fallback to single upload.", e)); status.append(false); try { return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt); } catch(BackgroundException f) { log.warn(String.format("Failure %s using single upload. Throw original multipart failure %s", e, e)); throw e; } } } // Use single upload service return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt); }
@Test(expected = NotfoundException.class) public void testUploadInvalidContainer() throws Exception { final S3ThresholdUploadService m = new S3ThresholdUploadService(session, new S3AccessControlListFeature(session), 5 * 1024L); final Path container = new Path("nosuchcontainer.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Local local = new NullLocal(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final TransferStatus status = new TransferStatus().withLength(5 * 1024L); m.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, null); }
public boolean isMulticast() { return isIp4() ? Ip4Prefix.IPV4_MULTICAST_PREFIX.contains(this.getIp4Address()) : Ip6Prefix.IPV6_MULTICAST_PREFIX.contains(this.getIp6Address()); }
@Test public void testIsMulticast() { IpAddress v4Unicast = IpAddress.valueOf("10.0.0.1"); IpAddress v4Multicast = IpAddress.valueOf("224.0.0.1"); IpAddress v6Unicast = IpAddress.valueOf("1000::1"); IpAddress v6Multicast = IpAddress.valueOf("ff02::1"); assertFalse(v4Unicast.isMulticast()); assertTrue(v4Multicast.isMulticast()); assertFalse(v6Unicast.isMulticast()); assertTrue(v6Multicast.isMulticast()); }
public void isPresent() { if (actual == null) { failWithActual(simpleFact("expected present optional")); } else if (!actual.isPresent()) { failWithoutActual(simpleFact("expected to be present")); } }
@Test public void isPresentFailing() { expectFailureWhenTestingThat(Optional.absent()).isPresent(); assertFailureKeys("expected to be present"); }
public int fairSelectNextIndex(long availableInputsMask, int lastReadInputIndex) { return fairSelectNextIndex(inputMask, availableInputsMask, lastReadInputIndex); }
@Test void testFairSelectNextIndexWithAllInputsSelected() { assertThat(InputSelection.ALL.fairSelectNextIndex(7, 0)).isOne(); assertThat(InputSelection.ALL.fairSelectNextIndex(7, 1)).isEqualTo(2); assertThat(InputSelection.ALL.fairSelectNextIndex(7, 2)).isZero(); assertThat(InputSelection.ALL.fairSelectNextIndex(7, 0)).isOne(); assertThat(InputSelection.ALL.fairSelectNextIndex(0, 2)) .isEqualTo(InputSelection.NONE_AVAILABLE); assertThat(InputSelection.ALL.fairSelectNextIndex(-1, 10)).isEqualTo(11); assertThat(InputSelection.ALL.fairSelectNextIndex(-1, 63)).isZero(); assertThat(InputSelection.ALL.fairSelectNextIndex(-1, 158)).isZero(); }
public static CharSequence escapeCsv(CharSequence value) { return escapeCsv(value, false); }
@Test public void escapeCsvAlreadyEscapedQuote() { CharSequence value = "\"some\"\""; CharSequence expected = "\"some\"\"\""; escapeCsv(value, expected); }
private static void execute(String... args) throws Exception { LogDirsCommandOptions options = new LogDirsCommandOptions(args); try (Admin adminClient = createAdminClient(options)) { execute(options, adminClient); } }
@Test public void shouldThrowWhenQueryingNonExistentBrokers() { Node broker = new Node(1, "hostname", 9092); try (MockAdminClient adminClient = new MockAdminClient(Collections.singletonList(broker), broker)) { RuntimeException exception = assertThrows(RuntimeException.class, () -> execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "0,1,2", "--describe"), adminClient)); assertNotNull(exception.getCause()); assertEquals(TerseException.class, exception.getCause().getClass()); assertEquals("ERROR: The given brokers do not exist from --broker-list: 0,2. Current existent brokers: 1", exception.getCause().getMessage()); } }
public Promise<Void> gracefullyShutdownClientChannels() { return gracefullyShutdownClientChannels(ShutdownType.SHUTDOWN); }
@Test void connectionNeedsToBeForceClosedAndOneChannelThrowsAnException() throws Exception { String configName = "server.outofservice.close.timeout"; AbstractConfiguration configuration = ConfigurationManager.getConfigInstance(); try { configuration.setProperty(configName, "0"); createChannels(5); ChannelFuture connect = new Bootstrap() .group(CLIENT_EVENT_LOOP) .channel(LocalChannel.class) .handler(new ChannelInitializer<>() { @Override protected void initChannel(Channel ch) { ch.pipeline().addLast(new ChannelOutboundHandlerAdapter() { @Override public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { throw new Exception(); } }); } }) .remoteAddress(LOCAL_ADDRESS) .connect() .sync(); channels.add(connect.channel()); boolean await = shutdown.gracefullyShutdownClientChannels().await(10, TimeUnit.SECONDS); assertTrue(await, "the promise should finish even if a channel failed to close"); assertEquals(1, channels.size(), "all other channels should have been closed"); } finally { configuration.setProperty(configName, "30"); } }
public byte[] getDataAsBytes() { return dataAsBytes; }
@Test void convertsToByteData() { String encodedString = Base64.getEncoder().encodeToString("asdf".getBytes(StandardCharsets.UTF_8)); Image image = new Image("image/png", encodedString, "sha-256"); assertThat(image.getDataAsBytes(), is("asdf".getBytes(StandardCharsets.UTF_8))); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testCoordinatorNotAvailableWithUserAssignedType() { subscriptions.assignFromUser(Collections.singleton(t1p)); // should mark coordinator unknown after COORDINATOR_NOT_AVAILABLE error client.prepareResponse(groupCoordinatorResponse(node, Errors.COORDINATOR_NOT_AVAILABLE)); // set timeout to 0 because we don't want to retry after the error coordinator.poll(time.timer(0)); assertTrue(coordinator.coordinatorUnknown()); // should not try to find coordinator since we are in manual assignment // hence the prepared response should not be returned client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.poll(time.timer(Long.MAX_VALUE)); assertTrue(coordinator.coordinatorUnknown()); }
public static HostInfo getHostInfo() { return Singleton.get(HostInfo.class); }
@Test public void getHostInfo() { final HostInfo hostInfo = SystemUtil.getHostInfo(); assertNotNull(hostInfo); }
@Override public Void visitModifyBackendClause(ModifyBackendClause clause, ConnectContext context) { if (clause.getBackendHostPort() == null) { checkModifyHostClause(clause.getSrcHost(), clause.getDestHost()); } else { SystemInfoService.validateHostAndPort(clause.getBackendHostPort(), false); analyzeBackendProperties(clause.getProperties()); } return null; }
@Test public void testVisitModifyBackendClause() { mockNet(); AlterSystemStmtAnalyzer visitor = new AlterSystemStmtAnalyzer(); ModifyBackendClause clause = new ModifyBackendClause("test", "fqdn"); Void result = visitor.visitModifyBackendClause(clause, null); }
public UiTopoLayout parent(UiTopoLayoutId parentId) { if (isRoot()) { throw new IllegalArgumentException(E_ROOT_PARENT); } // TODO: consider checking ancestry chain to prevent loops parent = parentId; return this; }
@Test public void setParentOnRoot() { mkRootLayout(); try { layout.parent(OTHER_ID); fail(AM_NOEX); } catch (IllegalArgumentException e) { assertEquals(AM_WREXMSG, E_ROOT_PARENT, e.getMessage()); } try { layout.parent(null); fail(AM_NOEX); } catch (IllegalArgumentException e) { assertEquals(AM_WREXMSG, E_ROOT_PARENT, e.getMessage()); } }
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor( DoFn<InputT, OutputT> fn) { return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn); }
@Test public void testTruncateFnWithHasDefaultMethodsWhenUnbounded() throws Exception { class UnboundedMockFn extends DoFn<String, String> { @ProcessElement public void processElement( ProcessContext c, RestrictionTracker<RestrictionWithUnboundedDefaultTracker, Void> tracker, WatermarkEstimator<WatermarkEstimatorStateWithDefaultWatermarkEstimator> watermarkEstimator) {} @GetInitialRestriction public RestrictionWithUnboundedDefaultTracker getInitialRestriction(@Element String element) { return null; } @GetInitialWatermarkEstimatorState public WatermarkEstimatorStateWithDefaultWatermarkEstimator getInitialWatermarkEstimatorState() { return null; } } UnboundedMockFn fn = mock(UnboundedMockFn.class); DoFnInvoker<String, String> invoker = DoFnInvokers.invokerFor(fn); CoderRegistry coderRegistry = CoderRegistry.createDefault(); coderRegistry.registerCoderProvider( CoderProviders.fromStaticMethods( RestrictionWithUnboundedDefaultTracker.class, CoderForDefaultTracker.class)); coderRegistry.registerCoderForClass( WatermarkEstimatorStateWithDefaultWatermarkEstimator.class, new CoderForWatermarkEstimatorStateWithDefaultWatermarkEstimator()); assertThat( invoker.<RestrictionWithBoundedDefaultTracker>invokeGetRestrictionCoder(coderRegistry), instanceOf(CoderForDefaultTracker.class)); assertThat( invoker.invokeGetWatermarkEstimatorStateCoder(coderRegistry), instanceOf(CoderForWatermarkEstimatorStateWithDefaultWatermarkEstimator.class)); RestrictionTracker tracker = invoker.invokeNewTracker( new FakeArgumentProvider<String, String>() { @Override public Object restriction() { return new RestrictionWithUnboundedDefaultTracker(); } }); assertThat(tracker, instanceOf(UnboundedDefaultTracker.class)); TruncateResult<?> result = invoker.invokeTruncateRestriction( new FakeArgumentProvider<String, String>() { @Override public RestrictionTracker restrictionTracker() { return tracker; } @Override public String element(DoFn<String, String> doFn) { return "blah"; } @Override public Object restriction() { return "foo"; } }); assertNull(result); assertEquals(stop(), invoker.invokeProcessElement(mockArgumentProvider)); assertThat( invoker.invokeNewWatermarkEstimator( new FakeArgumentProvider<String, String>() { @Override public Object watermarkEstimatorState() { return new WatermarkEstimatorStateWithDefaultWatermarkEstimator(); } }), instanceOf(DefaultWatermarkEstimator.class)); }
public static FromEndOfWindow pastEndOfWindow() { return new FromEndOfWindow(); }
@Test public void testOnMergeRewinds() throws Exception { tester = TriggerStateMachineTester.forTrigger( AfterEachStateMachine.inOrder( AfterWatermarkStateMachine.pastEndOfWindow(), RepeatedlyStateMachine.forever(AfterPaneStateMachine.elementCountAtLeast(1))), Sessions.withGapDuration(Duration.millis(10))); tester.injectElements(1); tester.injectElements(5); IntervalWindow firstWindow = new IntervalWindow(new Instant(1), new Instant(11)); IntervalWindow secondWindow = new IntervalWindow(new Instant(5), new Instant(15)); IntervalWindow mergedWindow = new IntervalWindow(new Instant(1), new Instant(15)); // Finish the AfterWatermark.pastEndOfWindow() trigger in only the first window tester.advanceInputWatermark(new Instant(11)); assertTrue(tester.shouldFire(firstWindow)); assertFalse(tester.shouldFire(secondWindow)); tester.fireIfShouldFire(firstWindow); // Confirm that we are on the second trigger by probing assertFalse(tester.shouldFire(firstWindow)); tester.injectElements(1); assertTrue(tester.shouldFire(firstWindow)); tester.fireIfShouldFire(firstWindow); // Merging should re-activate the watermark trigger in the merged window tester.mergeWindows(); // Confirm that we are not on the second trigger by probing assertFalse(tester.shouldFire(mergedWindow)); tester.injectElements(1); assertFalse(tester.shouldFire(mergedWindow)); // And confirm that advancing the watermark fires again tester.advanceInputWatermark(new Instant(15)); assertTrue(tester.shouldFire(mergedWindow)); }
public synchronized Future<V> submit() { if (pending == null) { pending = new CompletableFuture<>(); maybeRun(); } return pending; }
@SuppressWarnings("empty-statement") @Test public void doubleBooking() throws Exception { AtomicInteger counter = new AtomicInteger(); OneShotEvent lock = new OneShotEvent(); Future<?> f1, f2; ExecutorService base = Executors.newCachedThreadPool(); AtmostOneTaskExecutor<?> es = new AtmostOneTaskExecutor<Void>(base, () -> { counter.incrementAndGet(); try { lock.block(); } catch (InterruptedException x) { throw new RuntimeException(x); } return null; }); f1 = es.submit(); while (counter.get() == 0) { // spin lock until executor gets to the choking point } f2 = es.submit(); // this should hang Thread.sleep(500); // make sure the 2nd task is hanging assertEquals(1, counter.get()); assertFalse(f2.isDone()); lock.signal(); // let the first one go f1.get(); // first one should complete // now 2nd one gets going and hits the choke point f2.get(); assertEquals(2, counter.get()); }
@Override public String toString() { return String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader == null ? "none" : leader.idString(), formatNodeIds(replicas), formatNodeIds(inSyncReplicas), formatNodeIds(offlineReplicas)); }
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1}; Node[] offlineReplicas = new Node[] {r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas, offlineReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1]", "[2]"); assertEquals(expected, partitionInfo.toString()); }
@VisibleForTesting void setResourceInformationUnSafe(Object resource, String resourceName, long amount) { if (!isYarnResourceTypesAvailable) { LOG.info( "Will not request extended resource {} because the used YARN version does not support it.", resourceName); return; } try { resourceSetResourceInformationMethod.invoke( resource, resourceName, resourceInformationNewInstanceMethod.invoke(null, resourceName, amount)); } catch (Exception e) { LOG.warn( "Error in setting the external resource {}. Will not request this resource from YARN.", resourceName, e); } }
@Test void testSetResourceInformationIfMethodPresent() { final ResourceInformationReflector resourceInformationReflector = new ResourceInformationReflector( ResourceWithMethod.class.getName(), ResourceInfoWithMethod.class.getName()); final ResourceWithMethod resourceWithMethod = new ResourceWithMethod(); resourceInformationReflector.setResourceInformationUnSafe( resourceWithMethod, RESOURCE_NAME, RESOURCE_VALUE); assertThat(resourceWithMethod.getResourceWithName(RESOURCE_NAME)).isNotNull(); assertThat(resourceWithMethod.getResourceWithName(RESOURCE_NAME).getName()) .isEqualTo(RESOURCE_NAME); assertThat(resourceWithMethod.getResourceWithName(RESOURCE_NAME).getValue()) .isEqualTo(RESOURCE_VALUE); }
@Override protected void doDelete(final List<SelectorData> dataList) { dataList.forEach(pluginDataSubscriber::unSelectorSubscribe); }
@Test public void testDoDelete() { List<SelectorData> selectorDataList = createFakeSelectorDataObjects(3); selectorDataHandler.doDelete(selectorDataList); selectorDataList.forEach(verify(subscriber)::unSelectorSubscribe); }
public Collection<HttpRequest.KeyValuePair> getHeaders() { return headers; }
@Test void testNothingButGetCoverage() { new HttpResult().getHeaders(); }
@GetMapping("/by-namespace/count") public long getInstancesCountByNamespace(@RequestParam("appId") String appId, @RequestParam("clusterName") String clusterName, @RequestParam("namespaceName") String namespaceName) { Page<Instance> instances = instanceService.findInstancesByNamespace(appId, clusterName, namespaceName, PageRequest.of(0, 1)); return instances.getTotalElements(); }
@Test public void testGetInstancesCountByNamespace() throws Exception { String someAppId = "someAppId"; String someClusterName = "someClusterName"; String someNamespaceName = "someNamespaceName"; Page<Instance> instances = new PageImpl<>(Collections.emptyList(), pageable, 2); when(instanceService.findInstancesByNamespace(eq(someAppId), eq(someClusterName), eq(someNamespaceName), any(Pageable.class))).thenReturn(instances); long result = instanceConfigController.getInstancesCountByNamespace(someAppId, someClusterName, someNamespaceName); assertEquals(2, result); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer, final Merger<? super K, V> sessionMerger) { return aggregate(initializer, sessionMerger, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullInitializer3OnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(null, sessionMerger, Named.as("name"), Materialized.as("test"))); }
public boolean canProcessTimerange(DateTime maxTimestamp, Set<String> processorDependencies) { final ImmutableSet<String> foundIds = stateService.findByEventDefinitionsAndMaxTimestamp(processorDependencies, maxTimestamp) .stream() .map(EventProcessorStateDto::eventDefinitionId) .collect(ImmutableSet.toImmutableSet()); return foundIds.containsAll(processorDependencies); }
@Test public void canProcessTimerange() { final DateTime now = DateTime.now(DateTimeZone.UTC); final EventProcessorStateDto stateDto1 = EventProcessorStateDto.builder() .eventDefinitionId("a") .minProcessedTimestamp(now.minusDays(1)) .maxProcessedTimestamp(now) .build(); final EventProcessorStateDto stateDto2 = EventProcessorStateDto.builder() .eventDefinitionId("b") .minProcessedTimestamp(now.minusDays(1)) .maxProcessedTimestamp(now.minusHours(1)) .build(); final EventProcessorStateDto stateDto3 = EventProcessorStateDto.builder() .eventDefinitionId("c") .minProcessedTimestamp(now.minusDays(1)) .maxProcessedTimestamp(now.minusHours(2)) .build(); // No state objects yet assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("a"))).isFalse(); stateService.setState(stateDto1); stateService.setState(stateDto2); stateService.setState(stateDto3); // No state object has processedTimerageEnd >= now + 1h assertThat(dependencyCheck.canProcessTimerange(now.plusHours(1), ImmutableSet.of("a"))).isFalse(); // Only processor "a" has been processed at "now" assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("a"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("a", "b"))).isFalse(); assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("a", "c"))).isFalse(); assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("a", "b", "c"))).isFalse(); assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("b"))).isFalse(); assertThat(dependencyCheck.canProcessTimerange(now, ImmutableSet.of("c"))).isFalse(); // Only processors "a" and "b" have been processed at now - 1h assertThat(dependencyCheck.canProcessTimerange(now.minusHours(1), ImmutableSet.of("a", "b"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(1), ImmutableSet.of("a", "c"))).isFalse(); // Processors "a", "b" and "c" have been processed at now - 2h assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("a", "b", "c"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("a", "b"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("a", "c"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("a"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("b"))).isTrue(); assertThat(dependencyCheck.canProcessTimerange(now.minusHours(2), ImmutableSet.of("c"))).isTrue(); }
@Override public void onIssueChanges(QGChangeEvent qualityGateEvent, Set<ChangedIssue> changedIssues) { Optional<EvaluatedQualityGate> evaluatedQualityGate = qualityGateEvent.getQualityGateSupplier().get(); Optional<Metric.Level> previousStatusOptional = qualityGateEvent.getPreviousStatus(); if (evaluatedQualityGate.isEmpty() || previousStatusOptional.isEmpty()) { return; } Metric.Level currentStatus = evaluatedQualityGate.get().getStatus(); Metric.Level previousStatus = previousStatusOptional.get(); if (previousStatus.getColorName().equals(currentStatus.getColorName())) { // QG status didn't change - no action return; } addQualityGateEventToProject(qualityGateEvent, currentStatus); }
@Test public void onIssueChanges_givenEventWithNoPreviousStatus_doNotInteractWithDatabase() { EvaluatedQualityGate value = EvaluatedQualityGate.newBuilder() .setQualityGate(createDefaultQualityGate()) .addEvaluatedCondition(createEvaluatedCondition()) .setStatus(Metric.Level.ERROR) .build(); Supplier<Optional<EvaluatedQualityGate>> qualityGateSupplier = () -> Optional.of(value); QGChangeEvent qualityGateEvent = new QGChangeEvent(project, branch, analysis, projectConfiguration, null, qualityGateSupplier); underTest.onIssueChanges(qualityGateEvent, Set.of()); verifyNoInteractions(dbClient); }
public abstract int[][] toNestedIndexes();
@Test void testToNestedIndexes() { assertThat(Projection.of(new int[] {1, 2, 3, 4}).toNestedIndexes()) .isEqualTo( new int[][] {new int[] {1}, new int[] {2}, new int[] {3}, new int[] {4}}); assertThat( Projection.of(new int[][] {new int[] {4}, new int[] {1, 3}, new int[] {2}}) .toNestedIndexes()) .isEqualTo(new int[][] {new int[] {4}, new int[] {1, 3}, new int[] {2}}); }
public void eval(Object... args) throws HiveException { // When the parameter is (Integer, Array[Double]), Flink calls udf.eval(Integer, // Array[Double]), which is not a problem. // But when the parameter is a single array, Flink calls udf.eval(Array[Double]), // at this point java's var-args will cast Array[Double] to Array[Object] and let it be // Object... args, So we need wrap it. if (isArgsSingleArray) { args = new Object[] {args}; } checkArgument(args.length == conversions.length); if (!allIdentityConverter) { for (int i = 0; i < args.length; i++) { args[i] = conversions[i].toHiveObject(args[i]); } } function.process(args); }
@Test public void testArray() throws Exception { Object[] constantArgs = new Object[] {null}; DataType[] dataTypes = new DataType[] {DataTypes.ARRAY(DataTypes.INT())}; HiveGenericUDTF udf = init(GenericUDTFPosExplode.class, constantArgs, dataTypes); udf.eval(new Integer[] {1, 2, 3}); assertThat(collector.result) .isEqualTo(Arrays.asList(Row.of(0, 1), Row.of(1, 2), Row.of(2, 3))); }
public boolean evaluateIfActiveVersion(UpdateCenter updateCenter) { Version installedVersion = Version.create(sonarQubeVersion.get().toString()); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getLtaVersion().getVersion()) == 0) { return true; } SortedSet<Release> allReleases = updateCenter.getSonar().getAllReleases(); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getPastLtaVersion().getVersion()) == 0) { Release initialLtaRelease = findInitialVersionOfMajorRelease(allReleases, updateCenter.getSonar().getLtaVersion().getVersion()); Date initialLtaReleaseDate = initialLtaRelease.getDate(); if (initialLtaReleaseDate == null) { throw new IllegalStateException("Initial Major release date is missing in releases"); } // date of the latest major release should be within 6 months Calendar c = Calendar.getInstance(); c.setTime(new Date(system2.now())); c.add(Calendar.MONTH, -6); return initialLtaReleaseDate.after(c.getTime()); } else { return compareWithoutPatchVersion(installedVersion, findPreviousReleaseIgnoringPatch(allReleases).getVersion()) >= 0; } }
@Test void evaluateIfActiveVersion_whenInstalledVersionIsLatestLta_shouldReturnActiveVersion() { when(updateCenter.getSonar().getAllReleases()).thenReturn(getReleases()); when(sonarQubeVersion.get()).thenReturn(parse("9.9.2")); assertThat(underTest.evaluateIfActiveVersion(updateCenter)).isTrue(); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Test void testFunctionWithMissingGenerics() { RichMapFunction function = new RichMapFunction() { private static final long serialVersionUID = 1L; @Override public String map(Object value) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(function, Types.STRING, "name", true); assertThat(ti).isInstanceOf(MissingTypeInfo.class); assertThatThrownBy(() -> TypeExtractor.getMapReturnTypes(function, Types.STRING)) .isInstanceOf(InvalidTypesException.class); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testMapProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(MapPrimitive.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); assertEquals(MAP_PRIMITIVE_ROW, toRow.apply(toDynamic(MAP_PRIMITIVE_PROTO))); }
public static String getPathWithParams(final URI uri) { if (Objects.isNull(uri)) { return StringUtils.EMPTY; } String params = StringUtils.isEmpty(uri.getQuery()) ? "" : "?" + uri.getQuery(); return uri.getRawPath() + params; }
@Test void getPathWithParams() { URI uri = UriUtils.createUri("https://example.com"); assertNotNull(uri); String ret = UriUtils.getPathWithParams(uri); assertEquals("", ret); uri = UriUtils.createUri("https://example.com/path"); assertNotNull(uri); ret = UriUtils.getPathWithParams(uri); assertEquals("/path", ret); uri = UriUtils.createUri("https://example.com/path?key=val"); assertNotNull(uri); ret = UriUtils.getPathWithParams(uri); assertEquals("/path?key=val", ret); uri = UriUtils.createUri("/path?key=val"); assertNotNull(uri); ret = UriUtils.getPathWithParams(uri); assertEquals("/path?key=val", ret); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldFailIfMetadataNotInitialized() throws Exception { // Given: command = PARSER.parse("-n"); createMigrationFile(1, NAME, migrationsDir, COMMAND); when(sourceDescriptionCf.get()) .thenThrow(new ExecutionException("Source not found", new RuntimeException())); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(1)); Mockito.verify(ksqlClient, times(0)).executeStatement(any(), any()); Mockito.verify(ksqlClient, times(0)).insertInto(any(), any()); }
public Optional<Long> getTokenTimeout( final Optional<String> token, final KsqlConfig ksqlConfig, final Optional<KsqlAuthTokenProvider> authTokenProvider ) { final long maxTimeout = ksqlConfig.getLong(KsqlConfig.KSQL_WEBSOCKET_CONNECTION_MAX_TIMEOUT_MS); if (maxTimeout > 0) { if (authTokenProvider.isPresent() && token.isPresent()) { try { final long tokenTimeout = authTokenProvider.get() .getLifetimeMs(StringUtils.removeStart(token.get(), BEARER)) - clock.millis(); return Optional.of(Math.min(tokenTimeout, maxTimeout)); } catch (final Exception e) { log.error(e.getMessage()); } } return Optional.of(maxTimeout); } else { return Optional.empty(); } }
@Test public void shouldReturnEmptyWhenConfigSetToZero() { // Given: when(ksqlConfig.getLong(KsqlConfig.KSQL_WEBSOCKET_CONNECTION_MAX_TIMEOUT_MS)).thenReturn(0L); // Then: assertThat(authenticationUtil.getTokenTimeout(Optional.of(TOKEN), ksqlConfig, Optional.of(authTokenProvider)), equalTo(Optional.empty())); }
@Override public long get(long key1, int key2) { return super.get0(key1, key2); }
@Test public void testMigrateTo() { final SlotAssignmentResult slot = insert(1, 2); mem.putLong(slot.address(), 3); final HeapMemoryManager mgr2 = new HeapMemoryManager(memMgr); hsa.migrateTo(mgr2.getAllocator()); assertEquals(0, memMgr.getUsedMemory()); final long newValueAddr = hsa.get(1, 2); assertEquals(3, mem.getLong(newValueAddr)); }
@Override public AlarmId id() { return id; }
@Test public void testId() { final DefaultAlarm a = generate(); final DefaultAlarm b = new DefaultAlarm.Builder(a).build(); assertEquals("id ignored in equals", a, b); assertEquals(ALARM_ID, a.id()); assertEquals(ALARM_ID, b.id()); assertEquals(ALARM_ENTITY_ID, b.source()); }
public static void mergeMap(boolean decrypt, Map<String, Object> config) { merge(decrypt, config); }
@Test public void testMap_mergeApplied_mutatesInPlaceCorrectly() { Map<String, Object> testMap = new HashMap<>(); testMap.put("key", "${TEST.string}"); CentralizedManagement.mergeMap(true, testMap); Assert.assertEquals("test", testMap.get("key").toString()); }
public static TopicConsumerConfigurationData ofTopicsPattern(@NonNull Pattern topicsPattern, int priorityLevel) { return of(new TopicNameMatcher.TopicsPattern(topicsPattern), priorityLevel); }
@Test(dataProvider = "topicNameMatch") public void testTopicNameMatch(String topicName, boolean expectedMatch) { TopicConsumerConfigurationData topicConsumerConfigurationData = TopicConsumerConfigurationData .ofTopicsPattern(Pattern.compile("^foo$"), 1); assertThat(topicConsumerConfigurationData.getTopicNameMatcher().matches(topicName)).isEqualTo(expectedMatch); }
static boolean canUpdate(int transactionLogLayer, int transactionLogIndex, int entryInfoLayer, int entryInfoIndex) { if (transactionLogLayer == entryInfoLayer) { // Must make sure to not update beyond the current index if (transactionLogIndex >= entryInfoIndex) { return true; } } if (transactionLogLayer > entryInfoLayer) { if (transactionLogIndex < entryInfoIndex) { return true; } } return false; }
@DisplayName("Tests that prevent updating a record that has been rolled-over") @Test void testCannotUpdateRolledOverRecord() { assertFalse(TransactionLog.canUpdate(1, 3, 0, 1)); assertFalse(TransactionLog.canUpdate(1, 3, 0, 3)); }
public static Read<Solace.Record> read() { return new Read<Solace.Record>( Read.Configuration.<Solace.Record>builder() .setTypeDescriptor(TypeDescriptor.of(Solace.Record.class)) .setParseFn(SolaceRecordMapper::map) .setTimestampFn(SENDER_TIMESTAMP_FUNCTION) .setDeduplicateRecords(DEFAULT_DEDUPLICATE_RECORDS) .setWatermarkIdleDurationThreshold(DEFAULT_WATERMARK_IDLE_DURATION_THRESHOLD)); }
@Test public void testReadWithCoderAndParseFnAndTimestampFn() { // Broker that creates input data MockSessionService mockClientService = new MockSessionService( index -> { List<BytesXMLMessage> messages = ImmutableList.of( SolaceDataUtils.getBytesXmlMessage("payload_test0", "450"), SolaceDataUtils.getBytesXmlMessage("payload_test1", "451"), SolaceDataUtils.getBytesXmlMessage("payload_test2", "452")); return getOrNull(index, messages); }, 3); SessionServiceFactory fakeSessionServiceFactory = new MockSessionServiceFactory(mockClientService); // Expected data List<SimpleRecord> expected = new ArrayList<>(); expected.add(new SimpleRecord("payload_test0", "450")); expected.add(new SimpleRecord("payload_test1", "451")); expected.add(new SimpleRecord("payload_test2", "452")); // Run the pipeline PCollection<SimpleRecord> events = pipeline.apply( "Read from Solace", SolaceIO.read( TypeDescriptor.of(SimpleRecord.class), input -> new SimpleRecord( new String(input.getBytes(), StandardCharsets.UTF_8), input.getApplicationMessageId()), input -> Instant.ofEpochMilli(1708100477061L)) .from(Solace.Queue.fromName("queue")) .withSempClientFactory(MockSempClientFactory.getDefaultMock()) .withSessionServiceFactory(fakeSessionServiceFactory) .withMaxNumConnections(1)); // Assert results PAssert.that(events).containsInAnyOrder(expected); pipeline.run(); }
private int checkForEndOfString(final int bracesParameter) throws IOException { if (bracesParameter == 0) { return 0; } // Check the next 3 bytes if available byte[] nextThreeBytes = new byte[3]; int amountRead = source.read(nextThreeBytes); if (amountRead > 0) { source.rewind(amountRead); } if (amountRead < 3) { return bracesParameter; } // The following cases are valid indicators for the end of the string // 1. Next line contains another COSObject: CR + LF + '/' // 2. COSDictionary ends in the next line: CR + LF + '>' // 3. Next line contains another COSObject: LF + '/' // 4. COSDictionary ends in the next line: LF + '>' // 5. Next line contains another COSObject: CR + '/' // 6. COSDictionary ends in the next line: CR + '>' if (((isCR(nextThreeBytes[0]) || isLF(nextThreeBytes[0])) && (nextThreeBytes[1] == '/' || nextThreeBytes[1] == '>')) // || // (isCR(nextThreeBytes[0]) && isLF(nextThreeBytes[1]) && (nextThreeBytes[2] == '/' || nextThreeBytes[2] == '>')) // ) { return 0; } return bracesParameter; }
@Test void testCheckForEndOfString() throws IOException { // (Test) byte[] inputBytes = { 40, 84, 101, 115, 116, 41 }; RandomAccessReadBuffer buffer = new RandomAccessReadBuffer(inputBytes); BaseParser baseParser = new COSParser(buffer); COSString cosString = baseParser.parseCOSString(); assertEquals("Test", cosString.getString()); String output = "(Test"; // ((Test) + LF + "/ " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 10, '/', ' ' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); // ((Test) + CR + "/ " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 13, '/', ' ' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); // ((Test) + CR + LF + "/ " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 13, 10, '/' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); // ((Test) + LF + "> " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 10, '>', ' ' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); // ((Test) + CR + "> " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 13, '>', ' ' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); // ((Test) + CR + LF + "> " inputBytes = new byte[] { '(', '(', 'T', 'e', 's', 't', ')', 13, 10, '>' }; buffer = new RandomAccessReadBuffer(inputBytes); baseParser = new COSParser(buffer); cosString = baseParser.parseCOSString(); assertEquals(output, cosString.getString()); }