focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void close() throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } try { // close should have been called after all pending transactions // have been flushed & synced. // if already closed, just skip if (doubleBuf != null) { doubleBuf.close(); doubleBuf = null; } // remove any preallocated padding bytes from the transaction log. if (fc != null && fc.isOpen()) { fc.truncate(fc.position()); fc.close(); fc = null; } fp.close(); fp = null; } finally { IOUtils.cleanupWithLogger(LOG, fc, fp); doubleBuf = null; fc = null; fp = null; } fp = null; }
@Test public void testEditLogFileOutputStreamCloseClose() throws IOException { // close after a close should result in an IOE EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0); editLogStream.close(); try { editLogStream.close(); } catch (IOException ioe) { String msg = StringUtils.stringifyException(ioe); assertTrue(msg, msg.contains("Trying to use aborted output stream")); } }
@Override public KeyValueIterator<Windowed<K>, V> fetch(final K key) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = store.fetch(key); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata. " + "Original error message: " + ise); } } return KeyValueIterators.emptyIterator(); }
@Test public void shouldReturnEmptyIteratorIfNoData() { try (final KeyValueIterator<Windowed<String>, Long> result = sessionStore.fetch("b")) { assertFalse(result.hasNext()); } }
public static List<String> getGoInstanceCmd(InstanceConfig instanceConfig, AuthenticationConfig authConfig, String originalCodeFileName, String pulsarServiceUrl, String stateStorageServiceUrl, String pulsarWebServiceUrl, boolean k8sRuntime) throws IOException { final List<String> args = new LinkedList<>(); GoInstanceConfig goInstanceConfig = new GoInstanceConfig(); // pass the raw functino details directly so that we don't need to assemble the `instanceConf.funcDetails` // manually in Go instance String functionDetails = JsonFormat.printer().omittingInsignificantWhitespace().print(instanceConfig.getFunctionDetails()); goInstanceConfig.setFunctionDetails(functionDetails); if (instanceConfig.getClusterName() != null) { goInstanceConfig.setClusterName(instanceConfig.getClusterName()); } if (null != stateStorageServiceUrl) { goInstanceConfig.setStateStorageServiceUrl(stateStorageServiceUrl); } if (instanceConfig.isExposePulsarAdminClientEnabled() && StringUtils.isNotBlank(pulsarWebServiceUrl)) { goInstanceConfig.setPulsarWebServiceUrl(pulsarWebServiceUrl); } if (instanceConfig.getInstanceId() != 0) { goInstanceConfig.setInstanceID(instanceConfig.getInstanceId()); } if (instanceConfig.getFunctionId() != null) { goInstanceConfig.setFuncID(instanceConfig.getFunctionId()); } if (instanceConfig.getFunctionVersion() != null) { goInstanceConfig.setFuncVersion(instanceConfig.getFunctionVersion()); } if (instanceConfig.getFunctionDetails().getAutoAck()) { goInstanceConfig.setAutoAck(instanceConfig.getFunctionDetails().getAutoAck()); } if (instanceConfig.getFunctionDetails().getTenant() != null) { goInstanceConfig.setTenant(instanceConfig.getFunctionDetails().getTenant()); } if (instanceConfig.getFunctionDetails().getNamespace() != null) { goInstanceConfig.setNameSpace(instanceConfig.getFunctionDetails().getNamespace()); } if (instanceConfig.getFunctionDetails().getName() != null) { goInstanceConfig.setName(instanceConfig.getFunctionDetails().getName()); } if (instanceConfig.getFunctionDetails().getLogTopic() != null) { goInstanceConfig.setLogTopic(instanceConfig.getFunctionDetails().getLogTopic()); } if (instanceConfig.getFunctionDetails().getProcessingGuarantees() != null) { goInstanceConfig .setProcessingGuarantees(instanceConfig.getFunctionDetails().getProcessingGuaranteesValue()); } if (instanceConfig.getFunctionDetails().getRuntime() != null) { goInstanceConfig.setRuntime(instanceConfig.getFunctionDetails().getRuntimeValue()); } if (instanceConfig.getFunctionDetails().getSecretsMap() != null) { goInstanceConfig.setSecretsMap(instanceConfig.getFunctionDetails().getSecretsMap()); } if (instanceConfig.getFunctionDetails().getUserConfig() != null) { goInstanceConfig.setUserConfig(instanceConfig.getFunctionDetails().getUserConfig()); } if (instanceConfig.getFunctionDetails().getParallelism() != 0) { goInstanceConfig.setParallelism(instanceConfig.getFunctionDetails().getParallelism()); } if (authConfig != null) { if (isNotBlank(authConfig.getClientAuthenticationPlugin()) && isNotBlank(authConfig.getClientAuthenticationParameters())) { goInstanceConfig.setClientAuthenticationPlugin(authConfig.getClientAuthenticationPlugin()); goInstanceConfig.setClientAuthenticationParameters(authConfig.getClientAuthenticationParameters()); } goInstanceConfig.setTlsAllowInsecureConnection( authConfig.isTlsAllowInsecureConnection()); goInstanceConfig.setTlsHostnameVerificationEnable( authConfig.isTlsHostnameVerificationEnable()); if (isNotBlank(authConfig.getTlsTrustCertsFilePath())){ goInstanceConfig.setTlsTrustCertsFilePath( authConfig.getTlsTrustCertsFilePath()); } } if (instanceConfig.getMaxBufferedTuples() != 0) { goInstanceConfig.setMaxBufTuples(instanceConfig.getMaxBufferedTuples()); } if (pulsarServiceUrl != null) { goInstanceConfig.setPulsarServiceURL(pulsarServiceUrl); } if (instanceConfig.getFunctionDetails().getSource().getCleanupSubscription()) { goInstanceConfig .setCleanupSubscription(instanceConfig.getFunctionDetails().getSource().getCleanupSubscription()); } if (instanceConfig.getFunctionDetails().getSource().getSubscriptionName() != null) { goInstanceConfig.setSubscriptionName(instanceConfig.getFunctionDetails().getSource().getSubscriptionName()); } goInstanceConfig.setSubscriptionPosition( instanceConfig.getFunctionDetails().getSource().getSubscriptionPosition().getNumber()); if (instanceConfig.getFunctionDetails().getSource().getInputSpecsMap() != null) { Map<String, String> sourceInputSpecs = new HashMap<>(); for (Map.Entry<String, Function.ConsumerSpec> entry : instanceConfig.getFunctionDetails().getSource().getInputSpecsMap().entrySet()) { String topic = entry.getKey(); Function.ConsumerSpec spec = entry.getValue(); sourceInputSpecs.put(topic, JsonFormat.printer().omittingInsignificantWhitespace().print(spec)); goInstanceConfig.setSourceSpecsTopic(topic); } goInstanceConfig.setSourceInputSpecs(sourceInputSpecs); } if (instanceConfig.getFunctionDetails().getSource().getTimeoutMs() != 0) { goInstanceConfig.setTimeoutMs(instanceConfig.getFunctionDetails().getSource().getTimeoutMs()); } if (instanceConfig.getFunctionDetails().getSink().getTopic() != null) { goInstanceConfig.setSinkSpecsTopic(instanceConfig.getFunctionDetails().getSink().getTopic()); } if (instanceConfig.getFunctionDetails().getResources().getCpu() != 0) { goInstanceConfig.setCpu(instanceConfig.getFunctionDetails().getResources().getCpu()); } if (instanceConfig.getFunctionDetails().getResources().getRam() != 0) { goInstanceConfig.setRam(instanceConfig.getFunctionDetails().getResources().getRam()); } if (instanceConfig.getFunctionDetails().getResources().getDisk() != 0) { goInstanceConfig.setDisk(instanceConfig.getFunctionDetails().getResources().getDisk()); } if (instanceConfig.getFunctionDetails().getRetryDetails().getDeadLetterTopic() != null) { goInstanceConfig .setDeadLetterTopic(instanceConfig.getFunctionDetails().getRetryDetails().getDeadLetterTopic()); } if (instanceConfig.getFunctionDetails().getRetryDetails().getMaxMessageRetries() != 0) { goInstanceConfig .setMaxMessageRetries(instanceConfig.getFunctionDetails().getRetryDetails().getMaxMessageRetries()); } if (instanceConfig.hasValidMetricsPort()) { goInstanceConfig.setMetricsPort(instanceConfig.getMetricsPort()); } goInstanceConfig.setKillAfterIdleMs(0); goInstanceConfig.setPort(instanceConfig.getPort()); // Parse the contents of goInstanceConfig into json form string ObjectMapper objectMapper = ObjectMapperFactory.getMapper().getObjectMapper(); String configContent = objectMapper.writeValueAsString(goInstanceConfig); args.add(originalCodeFileName); args.add("-instance-conf"); if (k8sRuntime) { args.add("'" + configContent + "'"); } else { args.add(configContent); } return args; }
@Test(dataProvider = "k8sRuntime") public void getGoInstanceCmd(boolean k8sRuntime) throws IOException { HashMap<String, String> goInstanceConfig; InstanceConfig instanceConfig = new InstanceConfig(); instanceConfig.setClusterName("kluster"); instanceConfig.setInstanceId(3000); instanceConfig.setFunctionId("func-7734"); instanceConfig.setFunctionVersion("1.0.0"); instanceConfig.setMaxBufferedTuples(5); instanceConfig.setPort(1337); instanceConfig.setMetricsPort(60000); AuthenticationConfig authConfig = AuthenticationConfig.builder() .clientAuthenticationPlugin("org.apache.pulsar.client.impl.auth.AuthenticationToken") .clientAuthenticationParameters("file:///secret/token.jwt") .tlsTrustCertsFilePath("/secret/ca.cert.pem") .tlsHostnameVerificationEnable(true) .tlsAllowInsecureConnection(false) .build(); JSONObject userConfig = new JSONObject(); userConfig.put("word-of-the-day", "der Weltschmerz"); JSONObject secretsMap = new JSONObject(); secretsMap.put("secret", "cake is a lie"); Function.SourceSpec sources = Function.SourceSpec.newBuilder() .setCleanupSubscription(true) .setSubscriptionName("go-func-sub") .setTimeoutMs(500) .putInputSpecs("go-func-input", Function.ConsumerSpec.newBuilder().setIsRegexPattern(false).build()) .build(); Function.RetryDetails retryDetails = Function.RetryDetails.newBuilder() .setDeadLetterTopic("go-func-deadletter") .setMaxMessageRetries(1) .build(); Function.Resources resources = Function.Resources.newBuilder() .setCpu(2) .setDisk(1024) .setRam(32) .build(); Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder() .setAutoAck(true) .setTenant("public") .setNamespace("default") .setName("go-func") .setLogTopic("go-func-log") .setProcessingGuarantees(Function.ProcessingGuarantees.ATLEAST_ONCE) .setRuntime(Function.FunctionDetails.Runtime.GO) .setSecretsMap(secretsMap.toJSONString()) .setParallelism(1) .setSource(sources) .setRetryDetails(retryDetails) .setResources(resources) .setUserConfig(userConfig.toJSONString()) .build(); instanceConfig.setFunctionDetails(functionDetails); instanceConfig.setExposePulsarAdminClientEnabled(true); List<String> commands = RuntimeUtils.getGoInstanceCmd(instanceConfig, authConfig, "config", "pulsar://localhost:6650", "bk://localhost:4181", "http://localhost:8080", k8sRuntime); if (k8sRuntime) { goInstanceConfig = new ObjectMapper().readValue(commands.get(2).replaceAll("^\'|\'$", ""), HashMap.class); } else { goInstanceConfig = new ObjectMapper().readValue(commands.get(2), HashMap.class); } Assert.assertEquals(commands.toArray().length, 3); Assert.assertEquals(commands.get(0), "config"); Assert.assertEquals(commands.get(1), "-instance-conf"); Assert.assertEquals(goInstanceConfig.get("maxBufTuples"), 5); Assert.assertEquals(goInstanceConfig.get("maxMessageRetries"), 1); Assert.assertEquals(goInstanceConfig.get("killAfterIdleMs"), 0); Assert.assertEquals(goInstanceConfig.get("parallelism"), 1); Assert.assertEquals(goInstanceConfig.get("className"), ""); Assert.assertEquals(goInstanceConfig.get("sourceSpecsTopic"), "go-func-input"); Assert.assertEquals(goInstanceConfig.get("secretsMap"), secretsMap.toString()); Assert.assertEquals(goInstanceConfig.get("sourceSchemaType"), ""); Assert.assertEquals(goInstanceConfig.get("sinkSpecsTopic"), ""); Assert.assertEquals(goInstanceConfig.get("clusterName"), "kluster"); Assert.assertEquals(goInstanceConfig.get("nameSpace"), "default"); Assert.assertEquals(goInstanceConfig.get("receiverQueueSize"), 0); Assert.assertEquals(goInstanceConfig.get("tenant"), "public"); Assert.assertEquals(goInstanceConfig.get("ram"), 32); Assert.assertEquals(goInstanceConfig.get("logTopic"), "go-func-log"); Assert.assertEquals(goInstanceConfig.get("processingGuarantees"), 0); Assert.assertEquals(goInstanceConfig.get("autoAck"), true); Assert.assertEquals(goInstanceConfig.get("regexPatternSubscription"), false); Assert.assertEquals(goInstanceConfig.get("pulsarServiceURL"), "pulsar://localhost:6650"); Assert.assertEquals(goInstanceConfig.get("stateStorageServiceUrl"), "bk://localhost:4181"); Assert.assertEquals(goInstanceConfig.get("pulsarWebServiceUrl"), "http://localhost:8080"); Assert.assertEquals(goInstanceConfig.get("runtime"), 3); Assert.assertEquals(goInstanceConfig.get("cpu"), 2.0); Assert.assertEquals(goInstanceConfig.get("funcID"), "func-7734"); Assert.assertEquals(goInstanceConfig.get("funcVersion"), "1.0.0"); Assert.assertEquals(goInstanceConfig.get("disk"), 1024); Assert.assertEquals(goInstanceConfig.get("instanceID"), 3000); Assert.assertEquals(goInstanceConfig.get("cleanupSubscription"), true); Assert.assertEquals(goInstanceConfig.get("port"), 1337); Assert.assertEquals(goInstanceConfig.get("subscriptionType"), 0); Assert.assertEquals(goInstanceConfig.get("timeoutMs"), 500); Assert.assertEquals(goInstanceConfig.get("subscriptionName"), "go-func-sub"); Assert.assertEquals(goInstanceConfig.get("name"), "go-func"); Assert.assertEquals(goInstanceConfig.get("expectedHealthCheckInterval"), 0); Assert.assertEquals(goInstanceConfig.get("deadLetterTopic"), "go-func-deadletter"); Assert.assertEquals(goInstanceConfig.get("userConfig"), userConfig.toString()); Assert.assertEquals(goInstanceConfig.get("metricsPort"), 60000); Assert.assertEquals(goInstanceConfig.get("clientAuthenticationPlugin"), "org.apache.pulsar.client.impl.auth.AuthenticationToken"); Assert.assertEquals(goInstanceConfig.get("clientAuthenticationParameters"), "file:///secret/token.jwt"); Assert.assertEquals(goInstanceConfig.get("tlsTrustCertsFilePath"), "/secret/ca.cert.pem"); Assert.assertEquals(goInstanceConfig.get("tlsHostnameVerificationEnable"), true); Assert.assertEquals(goInstanceConfig.get("tlsAllowInsecureConnection"), false); }
public static Result find(List<Path> files, Consumer<LogEvent> logger) { List<String> mainClasses = new ArrayList<>(); for (Path file : files) { // Makes sure classFile is valid. if (!Files.exists(file)) { logger.accept(LogEvent.debug("MainClassFinder: " + file + " does not exist; ignoring")); continue; } if (!Files.isRegularFile(file)) { logger.accept( LogEvent.debug("MainClassFinder: " + file + " is not a regular file; skipping")); continue; } if (!file.toString().endsWith(".class")) { logger.accept( LogEvent.debug("MainClassFinder: " + file + " is not a class file; skipping")); continue; } MainClassVisitor mainClassVisitor = new MainClassVisitor(); try (InputStream classFileInputStream = Files.newInputStream(file)) { ClassReader reader = new ClassReader(classFileInputStream); reader.accept(mainClassVisitor, 0); if (mainClassVisitor.visitedMainClass) { mainClasses.add(reader.getClassName().replace('/', '.')); } } catch (IllegalArgumentException ex) { throw new UnsupportedOperationException( "Check the full stace trace, and if the root cause is from ASM ClassReader about " + "unsupported class file version, see " + "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md" + "#i-am-seeing-unsupported-class-file-major-version-when-building", ex); } catch (ArrayIndexOutOfBoundsException ignored) { // Not a valid class file (thrown by ClassReader if it reads an invalid format) logger.accept(LogEvent.warn("Invalid class file found: " + file)); } catch (IOException ignored) { // Could not read class file. logger.accept(LogEvent.warn("Could not read file: " + file)); } } if (mainClasses.size() == 1) { // Valid class found. return Result.success(mainClasses.get(0)); } if (mainClasses.isEmpty()) { // No main class found anywhere. return Result.mainClassNotFound(); } // More than one main class found. return Result.multipleMainClasses(mainClasses); }
@Test public void testFindMainClass_noClass() throws URISyntaxException, IOException { Path rootDirectory = Paths.get(Resources.getResource("core/class-finder-tests/no-main").toURI()); MainClassFinder.Result mainClassFinderResult = MainClassFinder.find(new DirectoryWalker(rootDirectory).walk(), logEventConsumer); Assert.assertEquals(Result.Type.MAIN_CLASS_NOT_FOUND, mainClassFinderResult.getType()); }
public static FileCollection generatePathingJar(final Project project, final String taskName, final FileCollection classpath, boolean alwaysUsePathingJar) throws IOException { //There is a bug in the Scala nsc compiler that does not parse the dependencies of JARs in the JAR manifest //As such, we disable pathing for any libraries compiling docs for Scala resources if (!alwaysUsePathingJar && !classpath.filter(f -> f.getAbsolutePath().contains("restli-tools-scala")).isEmpty()) { LOG.info("Compiling Scala resource classes. Disabling pathing jar for " + taskName + " to avoid breaking Scala compilation"); return classpath; } //We extract the classpath from the target task here, in the configuration phase //Note that we don't invoke getFiles() here because that would trigger dependency resolution in configuration phase FileCollection filteredClasspath = classpath.filter(f -> !f.isDirectory()); File destinationDir = new File(project.getBuildDir(), taskName); destinationDir.mkdirs(); File pathingJarPath = new File(destinationDir, project.getName() + "-pathing.jar"); OutputStream pathingJar = new FileOutputStream(pathingJarPath); //Classpath manifest does not support directories and needs to contain relative paths String cp = ClasspathManifest.relativeClasspathManifest(destinationDir, filteredClasspath.getFiles()); //Create the JAR Manifest manifest = new Manifest(); manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0"); manifest.getMainAttributes().put(Attributes.Name.CLASS_PATH, cp); JarOutputStream jarOutputStream = new JarOutputStream(pathingJar, manifest); jarOutputStream.close(); return classpath.filter(File::isDirectory).plus(project.files(pathingJarPath)); }
@Test public void testCreatesGeneratesPathingJar() throws IOException { //setup createTempDir(); Project project = ProjectBuilder.builder().withProjectDir(temp).build(); String taskName = "myTaskName"; project.getBuildDir().mkdir(); System.out.println(project.getBuildDir().getAbsolutePath()); File tempFile = new File(project.getBuildDir(), "temp1.class"); GFileUtils.touch(tempFile); FileCollection files = project.files(tempFile); //when PathingJarUtil.generatePathingJar(project, taskName, files, true); File pathingJar = new File(project.getBuildDir(), taskName + '/' + project.getName() + "-pathing.jar"); assertTrue(pathingJar.exists()); JarInputStream jarStream = new JarInputStream(new FileInputStream(pathingJar)); Manifest manifest = jarStream.getManifest(); assertTrue(manifest.getMainAttributes().getValue(Attributes.Name.CLASS_PATH).contains("temp1.class")); cleanupTempDir(); }
public static Note mergeNotes(List<Note> notes, boolean keepMergedNotes) { boolean locked = false; ArrayList<Attachment> attachments = new ArrayList<>(); String reminder = null; String reminderRecurrenceRule = null; Double latitude = null; Double longitude = null; Note mergedNote = new Note(); mergedNote.setTitle(notes.get(0).getTitle()); mergedNote.setArchived(notes.get(0).isArchived()); mergedNote.setCategory(notes.get(0).getCategory()); StringBuilder content = new StringBuilder(); // Just first note title must not be included into the content boolean includeTitle = false; for (Note note : notes) { appendContent(note, content, includeTitle); locked = locked || note.isLocked(); String currentReminder = note.getAlarm(); if (!StringUtils.isEmpty(currentReminder) && reminder == null) { reminder = currentReminder; reminderRecurrenceRule = note.getRecurrenceRule(); } latitude = ObjectUtils.defaultIfNull(latitude, note.getLatitude()); longitude = ObjectUtils.defaultIfNull(longitude, note.getLongitude()); addAttachments(keepMergedNotes, note, attachments); includeTitle = true; } mergedNote.setContent(content.toString()); mergedNote.setLocked(locked); mergedNote.setAlarm(reminder); mergedNote.setRecurrenceRule(reminderRecurrenceRule); mergedNote.setLatitude(latitude); mergedNote.setLongitude(longitude); mergedNote.setAttachmentsList(attachments); return mergedNote; }
@Test public void mergeNotes() { int notesNumber = 3; var notes = new ArrayList<Note>(); for (int i = 0; i < notesNumber; i++) { Note note = new Note(); note.setTitle("Merged note " + i + " title"); note.setContent("Merged note " + i + " content"); notes.add(note); } var mergeNote = NotesHelper.mergeNotes(notes, false); assertNotNull(mergeNote); assertEquals("Merged note 0 title", mergeNote.getTitle()); assertTrue(mergeNote.getContent().contains("Merged note 0 content")); assertTrue(mergeNote.getContent().contains("Merged note 1 content")); assertTrue(mergeNote.getContent().contains("Merged note 2 content")); assertEquals(StringUtils.countMatches(mergeNote.getContent(), MERGED_NOTES_SEPARATOR), 2); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final String prefix = containerService.isContainer(directory) ? StringUtils.EMPTY : containerService.getKey(directory) + Path.DELIMITER; return this.list(directory, listener, prefix); }
@Test public void testListFile() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final String name = new AlphanumericRandomStringService().random(); final Path file = new SwiftTouchFeature(session, new SwiftRegionService(session)).touch(new Path(container, name, EnumSet.of(Path.Type.file)), new TransferStatus()); try { new SwiftObjectListService(session).list(new Path(container, name, EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()); fail(); } catch(NotfoundException e) { // Expected } new SwiftDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final long timestamp = clock.getTime() / 1000; // oh it'd be lovely to use Java 7 here try { graphite.connect(); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { reportGauge(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Counter> entry : counters.entrySet()) { reportCounter(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { reportHistogram(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Meter> entry : meters.entrySet()) { reportMetered(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Timer> entry : timers.entrySet()) { reportTimer(entry.getKey(), entry.getValue(), timestamp); } graphite.flush(); } catch (IOException e) { LOGGER.warn("Unable to report to Graphite", graphite, e); } finally { try { graphite.close(); } catch (IOException e1) { LOGGER.warn("Error closing Graphite", graphite, e1); } } }
@Test public void reportsCounters() throws Exception { final Counter counter = mock(Counter.class); when(counter.getCount()).thenReturn(100L); reporter.report(map(), map("counter", counter), map(), map(), map()); final InOrder inOrder = inOrder(graphite); inOrder.verify(graphite).connect(); inOrder.verify(graphite).send("prefix.counter.count", "100", timestamp); inOrder.verify(graphite).flush(); inOrder.verify(graphite).close(); verifyNoMoreInteractions(graphite); }
public boolean isSevere() { return severe; }
@Test public void testThrowableMessageCtor() { Throwable t = mock( Throwable.class ); exception = new LifecycleException( "message", t, true ); assertEquals( t, exception.getCause() ); assertEquals( "message", exception.getMessage() ); assertTrue( exception.isSevere() ); }
@VisibleForTesting Set<PartitionFieldNode> getRoots() { return roots; }
@Test public void testPartitionFieldTree() throws MetaException { PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); List<String> projectionFields = new ArrayList<>(2); projectionFields.add("sd.location"); projectionFields.add("sd.parameters"); projectionFields.add("createTime"); projectionFields.add("sd.serdeInfo.serializationLib"); projectionFields.add("sd.cols"); projectionFields.add("parameters"); PartitionProjectionEvaluator projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Set<PartitionFieldNode> roots = projectionEvaluator.getRoots(); Set<PartitionFieldNode> expected = new HashSet<>(); PartitionFieldNode sdNode = new PartitionFieldNode("sd"); sdNode.addChild(new PartitionFieldNode("sd.location")); sdNode.addChild(new PartitionFieldNode("sd.parameters", true)); PartitionFieldNode sdColsNodes = new PartitionFieldNode("sd.cols", true); sdColsNodes.addChild(new PartitionFieldNode("sd.cols.name", true)); sdColsNodes.addChild(new PartitionFieldNode("sd.cols.type", true)); sdColsNodes.addChild(new PartitionFieldNode("sd.cols.comment", true)); sdNode.addChild(sdColsNodes); PartitionFieldNode serdeNode = new PartitionFieldNode("sd.serdeInfo"); serdeNode.addChild(new PartitionFieldNode("sd.serdeInfo.serializationLib")); sdNode.addChild(serdeNode); expected.add(sdNode); expected.add(new PartitionFieldNode("parameters", true)); expected.add(new PartitionFieldNode("createTime")); expected.add(new PartitionFieldNode("PART_ID")); expected.add(new PartitionFieldNode("SD_ID")); expected.add(new PartitionFieldNode("CD_ID")); expected.add(new PartitionFieldNode("SERDE_ID")); compare(expected, roots); }
@Override public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, boolean exclusive, ChannelPromise promise) { return frameWriter.writePriority(ctx, streamId, streamDependency, weight, exclusive, promise); }
@Test public void priorityWriteAfterGoAwayShouldSucceed() throws Exception { createStream(STREAM_ID, false); goAwayReceived(Integer.MAX_VALUE); ChannelPromise promise = newPromise(); encoder.writePriority(ctx, STREAM_ID, 0, (short) 255, true, promise); verify(writer).writePriority(eq(ctx), eq(STREAM_ID), eq(0), eq((short) 255), eq(true), eq(promise)); }
@Override public boolean canSave() { return true; }
@Test public void testCanSave() { assertTrue( meta.canSave() ); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStartBundleWithAllParameters() throws Exception { DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement public void processElement() {} @StartBundle public void startBundle( StartBundleContext context, BundleFinalizer bundleFinalizer, PipelineOptions options) {} }.getClass()); assertThat(sig.startBundle().extraParameters().size(), equalTo(3)); assertThat( sig.startBundle().extraParameters().get(0), instanceOf(StartBundleContextParameter.class)); assertThat( sig.startBundle().extraParameters().get(1), instanceOf(BundleFinalizerParameter.class)); assertThat( sig.startBundle().extraParameters().get(2), instanceOf(PipelineOptionsParameter.class)); }
@Udf public int field( @UdfParameter final String str, @UdfParameter final String... args ) { if (str == null || args == null) { return 0; } for (int i = 0; i < args.length; i++) { if (str.equals(args[i])) { return i + 1; } } return 0; }
@Test public void shouldNotFindMissing() { // When: final int pos = field.field("missing", "hello", "world"); // Then: assertThat(pos, equalTo(0)); }
public static String getCanonicalPath(File workDir) { try { return workDir.getCanonicalPath(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void shouldReturnCanonicalPath() throws IOException { assertThat(FileUtil.getCanonicalPath(folder)).isEqualTo(folder.getCanonicalPath()); File spyFile = spy(new File("/xyz/non-existent-file")); IOException canonicalPathException = new IOException("Failed to build the canonical path"); doThrow(canonicalPathException).when(spyFile).getCanonicalPath(); assertThatThrownBy(() -> FileUtil.getCanonicalPath(spyFile)) .isExactlyInstanceOf(RuntimeException.class) .hasCause(canonicalPathException); }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void cluster_down_if_less_than_min_ratio_of_storage_nodes_available() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) .bringEntireClusterUp() .reportStorageNodeState(0, State.DOWN) .reportStorageNodeState(2, State.DOWN); final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfStorageNodesUp(0.5); // TODO de-dupe a lot of these tests? final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:2 .0.s:d")); assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO))); }
@SqlNullable @Description("Returns a float between 0 and 1 representing the location of the closest point on the LineString to the given Point, as a fraction of total 2d line length.") @ScalarFunction("line_locate_point") @SqlType(DOUBLE) public static Double lineLocatePoint(@SqlType(GEOMETRY_TYPE_NAME) Slice lineSlice, @SqlType(GEOMETRY_TYPE_NAME) Slice pointSlice) { Geometry line = deserialize(lineSlice); Geometry point = deserialize(pointSlice); if (line.isEmpty() || point.isEmpty()) { return null; } GeometryType lineType = GeometryType.getForJtsGeometryType(line.getGeometryType()); if (lineType != GeometryType.LINE_STRING && lineType != GeometryType.MULTI_LINE_STRING) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("First argument to line_locate_point must be a LineString or a MultiLineString. Got: %s", line.getGeometryType())); } GeometryType pointType = GeometryType.getForJtsGeometryType(point.getGeometryType()); if (pointType != GeometryType.POINT) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Second argument to line_locate_point must be a Point. Got: %s", point.getGeometryType())); } return new LengthIndexedLine(line).indexOf(point.getCoordinate()) / line.getLength(); }
@Test public void testLineLocatePoint() { assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1)'), ST_Point(0, 0.2))", DOUBLE, 0.2); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1)'), ST_Point(0, 0))", DOUBLE, 0.0); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1)'), ST_Point(0, -1))", DOUBLE, 0.0); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1)'), ST_Point(0, 1))", DOUBLE, 1.0); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1)'), ST_Point(0, 2))", DOUBLE, 1.0); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1, 2 1)'), ST_Point(0, 0.2))", DOUBLE, 0.06666666666666667); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1, 2 1)'), ST_Point(0.9, 1))", DOUBLE, 0.6333333333333333); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (1 3, 5 4)'), ST_Point(1, 3))", DOUBLE, 0.0); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (1 3, 5 4)'), ST_Point(2, 3))", DOUBLE, 0.23529411764705882); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (1 3, 5 4)'), ST_Point(5, 4))", DOUBLE, 1.0); assertFunction("line_locate_point(ST_GeometryFromText('MULTILINESTRING ((0 0, 0 1), (2 2, 4 2))'), ST_Point(3, 1))", DOUBLE, 0.6666666666666666); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING EMPTY'), ST_Point(0, 1))", DOUBLE, null); assertFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1, 2 1)'), ST_GeometryFromText('POINT EMPTY'))", DOUBLE, null); assertInvalidFunction("line_locate_point(ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))'), ST_Point(0.4, 1))", "First argument to line_locate_point must be a LineString or a MultiLineString. Got: Polygon"); assertInvalidFunction("line_locate_point(ST_GeometryFromText('LINESTRING (0 0, 0 1, 2 1)'), ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))'))", "Second argument to line_locate_point must be a Point. Got: Polygon"); }
public static WorkflowInstanceAggregatedInfo computeAggregatedView( WorkflowInstance workflowInstance, boolean statusKnown) { if (workflowInstance == null) { // returning empty object since cannot access state of the current instance run return new WorkflowInstanceAggregatedInfo(); } WorkflowInstanceAggregatedInfo instanceAggregated = computeAggregatedViewNoStatus(workflowInstance); if (statusKnown || workflowInstance.getAggregatedInfo() == null) { instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus()); } else { computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated); } return instanceAggregated; }
@Test public void testAggregatedViewSimple() { WorkflowInstance run1 = getGenericWorkflowInstance( 1, WorkflowInstance.Status.SUCCEEDED, RunPolicy.START_FRESH_NEW_RUN, null); Workflow runtimeWorkflow = mock(Workflow.class); Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>(); decodedOverview.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 1L, 2L)); decodedOverview.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 3L, 4L)); decodedOverview.put("step3", generateStepState(StepInstance.Status.SUCCEEDED, 5L, 6L)); WorkflowRuntimeOverview overview = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview).when(overview).decodeStepOverview(run1.getRuntimeDag()); run1.setRuntimeOverview(overview); run1.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated = AggregatedViewHelper.computeAggregatedView(run1, false); assertEquals( decodedOverview.get("step1").getStatus(), aggregated.getStepAggregatedViews().get("step1").getStatus()); assertEquals( decodedOverview.get("step2").getStatus(), aggregated.getStepAggregatedViews().get("step2").getStatus()); assertEquals( decodedOverview.get("step3").getStatus(), aggregated.getStepAggregatedViews().get("step3").getStatus()); assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated.getWorkflowInstanceStatus()); WorkflowInstance run2 = getGenericWorkflowInstance( 2, WorkflowInstance.Status.SUCCEEDED, RunPolicy.RESTART_FROM_BEGINNING, RestartPolicy.RESTART_FROM_BEGINNING); Map<String, StepRuntimeState> decodedOverview2 = new LinkedHashMap<>(); decodedOverview2.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 7L, 8L)); decodedOverview2.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 9L, 10L)); decodedOverview2.put("step3", generateStepState(StepInstance.Status.SUCCEEDED, 11L, 12L)); doReturn(run1) .when(workflowInstanceDao) .getWorkflowInstanceRun(run2.getWorkflowId(), run2.getWorkflowInstanceId(), 1L); run2.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(run1, true)); assertEquals(3, run2.getAggregatedInfo().getStepAggregatedViews().size()); assertEquals( 1L, run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals( 3L, run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals( 5L, run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStartTime().longValue()); WorkflowRuntimeOverview wro2 = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview2).when(wro2).decodeStepOverview(run2.getRuntimeDag()); run2.setRuntimeOverview(wro2); run2.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated2 = AggregatedViewHelper.computeAggregatedView(run2, false); assertEquals( 2L, aggregated2.getStepAggregatedViews().get("step1").getWorkflowRunId().longValue()); assertEquals( 2L, aggregated2.getStepAggregatedViews().get("step2").getWorkflowRunId().longValue()); assertEquals( 2L, aggregated2.getStepAggregatedViews().get("step3").getWorkflowRunId().longValue()); assertEquals(7L, aggregated2.getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals(9L, aggregated2.getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(11L, aggregated2.getStepAggregatedViews().get("step3").getStartTime().longValue()); assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated2.getWorkflowInstanceStatus()); }
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (SynchronizeAfterMergeMeta) smi; data = (SynchronizeAfterMergeData) sdi; if ( super.init( smi, sdi ) ) { meta.normalizeAllocationFields(); data.realSchemaName = environmentSubstitute( meta.getSchemaName() ); if ( meta.istablenameInField() ) { if ( Utils.isEmpty( meta.gettablenameField() ) ) { logError( BaseMessages.getString( PKG, "SynchronizeAfterMerge.Log.Error.TableFieldnameEmpty" ) ); return false; } } data.databaseMeta = meta.getDatabaseMeta(); // if we are using Oracle then set releaseSavepoint to false if ( data.databaseMeta.getDatabaseInterface() instanceof OracleDatabaseMeta ) { data.releaseSavepoint = false; } data.commitSize = Integer.parseInt( environmentSubstitute( meta.getCommitSize() ) ); data.batchMode = data.commitSize > 0 && meta.useBatchUpdate(); // Batch updates are not supported on PostgreSQL (and look-a-likes) together with error handling (PDI-366) data.specialErrorHandling = getStepMeta().isDoingErrorHandling() && meta.getDatabaseMeta().supportsErrorHandlingOnBatchUpdates(); data.supportsSavepoints = meta.getDatabaseMeta().getDatabaseInterface().useSafePoints(); if ( data.batchMode && data.specialErrorHandling ) { data.batchMode = false; if ( log.isBasic() ) { logBasic( BaseMessages.getString( PKG, "SynchronizeAfterMerge.Log.BatchModeDisabled" ) ); } } data.db.setCommitSize( data.commitSize ); return true; } return false; }
@Test public void initWithCommitSizeVariable() throws KettleDatabaseException { StepMeta stepMeta = mock( StepMeta.class ); doReturn( STEP_NAME ).when( stepMeta ).getName(); doReturn( 1 ).when( stepMeta ).getCopies(); SynchronizeAfterMergeMeta smi = mock( SynchronizeAfterMergeMeta.class ); SynchronizeAfterMergeData sdi = mock( SynchronizeAfterMergeData.class ); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); doReturn( mock( MySQLDatabaseMeta.class ) ).when( dbMeta ).getDatabaseInterface(); doReturn( dbMeta ).when( smi ).getDatabaseMeta(); doReturn( "${commit.size}" ).when( smi ).getCommitSize(); TransMeta transMeta = mock( TransMeta.class ); doReturn( "1" ).when( transMeta ).getVariable( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER ); doReturn( "2" ).when( transMeta ).getVariable( Const.INTERNAL_VARIABLE_CLUSTER_SIZE ); doReturn( "Y" ).when( transMeta ).getVariable( Const.INTERNAL_VARIABLE_CLUSTER_MASTER ); doReturn( stepMeta ).when( transMeta ).findStep( STEP_NAME ); SynchronizeAfterMerge step = mock( SynchronizeAfterMerge.class ); doCallRealMethod().when( step ).setTransMeta( any( TransMeta.class ) ); doCallRealMethod().when( step ).setStepMeta( any( StepMeta.class ) ); doCallRealMethod().when( step ).init( any( StepMetaInterface.class ), any( StepDataInterface.class ) ); doCallRealMethod().when( step ).connectToDatabaseOrInitDataSource( any(), any() ); doReturn( stepMeta ).when( step ).getStepMeta(); doReturn( transMeta ).when( step ).getTransMeta(); doReturn( "1" ).when( step ).getStepExecutionId(); doReturn( "120" ).when( step ).environmentSubstitute( "${commit.size}" ); step.setTransMeta( transMeta ); step.setStepMeta( stepMeta ); step.init( smi, sdi ); assertEquals( 120, sdi.commitSize ); }
public static boolean isUnclosedQuote(final String line) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity int quoteStart = -1; for (int i = 0; i < line.length(); ++i) { if (quoteStart < 0 && isQuoteChar(line, i)) { quoteStart = i; } else if (quoteStart >= 0 && isTwoQuoteStart(line, i) && !isEscaped(line, i)) { // Together, two quotes are effectively an escaped quote and don't act as a quote character. // Skip the next quote char, since it's coupled with the first. i++; } else if (quoteStart >= 0 && isQuoteChar(line, i) && !isEscaped(line, i)) { quoteStart = -1; } } final int commentInd = line.indexOf(COMMENT); if (commentInd < 0) { return quoteStart >= 0; } else if (quoteStart < 0) { return false; } else { return commentInd > quoteStart; } }
@Test public void shouldNotFindUnclosedQuote_onlyComment() { // Given: final String line = "some line -- this is a comment"; // Then: assertThat(UnclosedQuoteChecker.isUnclosedQuote(line), is(false)); }
public static KerberosName parse(String principalName) { Matcher match = NAME_PARSER.matcher(principalName); if (!match.matches()) { if (principalName.contains("@")) { throw new IllegalArgumentException("Malformed Kerberos name: " + principalName); } else { return new KerberosName(principalName, null, null); } } else { return new KerberosName(match.group(1), match.group(3), match.group(4)); } }
@Test public void testParse() throws IOException { List<String> rules = Arrays.asList( "RULE:[1:$1](App\\..*)s/App\\.(.*)/$1/g", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g", "DEFAULT" ); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("App.service-name/example.com@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertEquals("example.com", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("App.service-name@REALM.COM"); assertEquals("App.service-name", name.serviceName()); assertNull(name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("service-name", shortNamer.shortName(name)); name = KerberosName.parse("user/host@REALM.COM"); assertEquals("user", name.serviceName()); assertEquals("host", name.hostName()); assertEquals("REALM.COM", name.realm()); assertEquals("user", shortNamer.shortName(name)); }
@Override public List<byte[]> mGet(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key : keys) { read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.readAsync(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } BatchResult<byte[]> r = (BatchResult<byte[]>) es.execute(); return r.getResponses(); }
@Test public void testMGet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); List<byte[]> r = connection.mGet(map.keySet().toArray(new byte[0][])); assertThat(r).containsExactly(map.values().toArray(new byte[0][])); }
@Override public void updateDiscountActivity(DiscountActivityUpdateReqVO updateReqVO) { // 校验存在 DiscountActivityDO discountActivity = validateDiscountActivityExists(updateReqVO.getId()); if (discountActivity.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { // 已关闭的活动,不能修改噢 throw exception(DISCOUNT_ACTIVITY_UPDATE_FAIL_STATUS_CLOSED); } // 校验商品是否冲突 validateDiscountActivityProductConflicts(updateReqVO.getId(), updateReqVO.getProducts()); // 更新活动 DiscountActivityDO updateObj = DiscountActivityConvert.INSTANCE.convert(updateReqVO) .setStatus(PromotionUtils.calculateActivityStatus(updateReqVO.getEndTime())); discountActivityMapper.updateById(updateObj); // 更新商品 updateDiscountProduct(updateReqVO); }
@Test public void testUpdateDiscountActivity_success() { // mock 数据(商品) DiscountActivityDO dbDiscountActivity = randomPojo(DiscountActivityDO.class); discountActivityMapper.insert(dbDiscountActivity);// @Sql: 先插入出一条存在的数据 // mock 数据(活动) DiscountProductDO dbDiscountProduct01 = randomPojo(DiscountProductDO.class, o -> o.setActivityId(dbDiscountActivity.getId()) .setSpuId(1L).setSkuId(2L).setDiscountType(PromotionDiscountTypeEnum.PRICE.getType()).setDiscountPrice(3).setDiscountPercent(null)); DiscountProductDO dbDiscountProduct02 = randomPojo(DiscountProductDO.class, o -> o.setActivityId(dbDiscountActivity.getId()) .setSpuId(10L).setSkuId(20L).setDiscountType(PromotionDiscountTypeEnum.PERCENT.getType()).setDiscountPercent(30).setDiscountPrice(null)); discountProductMapper.insert(dbDiscountProduct01); discountProductMapper.insert(dbDiscountProduct02); // 准备参数 DiscountActivityUpdateReqVO reqVO = randomPojo(DiscountActivityUpdateReqVO.class, o -> { o.setId(dbDiscountActivity.getId()); // 设置更新的 ID // 用于触发进行中的状态 o.setStartTime(addTime(Duration.ofDays(1))).setEndTime(addTime(Duration.ofDays(2))); // 设置商品 o.setProducts(asList(new DiscountActivityBaseVO.Product().setSpuId(1L).setSkuId(2L) .setDiscountType(PromotionDiscountTypeEnum.PRICE.getType()).setDiscountPrice(3).setDiscountPercent(null), new DiscountActivityBaseVO.Product().setSpuId(100L).setSkuId(200L) .setDiscountType(PromotionDiscountTypeEnum.PERCENT.getType()).setDiscountPercent(30).setDiscountPrice(null))); }); // 调用 discountActivityService.updateDiscountActivity(reqVO); // 校验活动 DiscountActivityDO discountActivity = discountActivityMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, discountActivity); assertEquals(discountActivity.getStatus(), PromotionActivityStatusEnum.WAIT.getStatus()); // 校验商品 List<DiscountProductDO> discountProducts = discountProductMapper.selectList(DiscountProductDO::getActivityId, discountActivity.getId()); assertEquals(discountProducts.size(), reqVO.getProducts().size()); for (int i = 0; i < reqVO.getProducts().size(); i++) { DiscountActivityBaseVO.Product product = reqVO.getProducts().get(i); DiscountProductDO discountProduct = discountProducts.get(i); assertEquals(discountProduct.getActivityId(), discountActivity.getId()); assertEquals(discountProduct.getSpuId(), product.getSpuId()); assertEquals(discountProduct.getSkuId(), product.getSkuId()); assertEquals(discountProduct.getDiscountType(), product.getDiscountType()); assertEquals(discountProduct.getDiscountPrice(), product.getDiscountPrice()); assertEquals(discountProduct.getDiscountPercent(), product.getDiscountPercent()); } }
public static JsonAsserter with(String json) { return new JsonAsserterImpl(JsonPath.parse(json).json()); }
@Test public void failed_error_message() throws Exception { assertThrows(AssertionError.class, () -> with(JSON).assertThat("$.store.book[0].category", endsWith("foobar"))); }
protected void mergeAndRevive(ConsumeReviveObj consumeReviveObj) throws Throwable { ArrayList<PopCheckPoint> sortList = consumeReviveObj.genSortList(); POP_LOGGER.info("reviveQueueId={}, ck listSize={}", queueId, sortList.size()); if (sortList.size() != 0) { POP_LOGGER.info("reviveQueueId={}, 1st ck, startOffset={}, reviveOffset={}; last ck, startOffset={}, reviveOffset={}", queueId, sortList.get(0).getStartOffset(), sortList.get(0).getReviveOffset(), sortList.get(sortList.size() - 1).getStartOffset(), sortList.get(sortList.size() - 1).getReviveOffset()); } long newOffset = consumeReviveObj.oldOffset; for (PopCheckPoint popCheckPoint : sortList) { if (!shouldRunPopRevive) { POP_LOGGER.info("slave skip ck process, revive topic={}, reviveQueueId={}", reviveTopic, queueId); break; } if (consumeReviveObj.endTime - popCheckPoint.getReviveTime() <= (PopAckConstants.ackTimeInterval + PopAckConstants.SECOND)) { break; } // check normal topic, skip ck , if normal topic is not exist String normalTopic = KeyBuilder.parseNormalTopic(popCheckPoint.getTopic(), popCheckPoint.getCId()); if (brokerController.getTopicConfigManager().selectTopicConfig(normalTopic) == null) { POP_LOGGER.warn("reviveQueueId={}, can not get normal topic {}, then continue", queueId, popCheckPoint.getTopic()); newOffset = popCheckPoint.getReviveOffset(); continue; } if (null == brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(popCheckPoint.getCId())) { POP_LOGGER.warn("reviveQueueId={}, can not get cid {}, then continue", queueId, popCheckPoint.getCId()); newOffset = popCheckPoint.getReviveOffset(); continue; } while (inflightReviveRequestMap.size() > 3) { waitForRunning(100); Pair<Long, Boolean> pair = inflightReviveRequestMap.firstEntry().getValue(); if (!pair.getObject2() && System.currentTimeMillis() - pair.getObject1() > 1000 * 30) { PopCheckPoint oldCK = inflightReviveRequestMap.firstKey(); rePutCK(oldCK, pair); inflightReviveRequestMap.remove(oldCK); POP_LOGGER.warn("stay too long, remove from reviveRequestMap, {}, {}, {}, {}", popCheckPoint.getTopic(), popCheckPoint.getBrokerName(), popCheckPoint.getQueueId(), popCheckPoint.getStartOffset()); } } reviveMsgFromCk(popCheckPoint); newOffset = popCheckPoint.getReviveOffset(); } if (newOffset > consumeReviveObj.oldOffset) { if (!shouldRunPopRevive) { POP_LOGGER.info("slave skip commit, revive topic={}, reviveQueueId={}", reviveTopic, queueId); return; } this.brokerController.getConsumerOffsetManager().commitOffset(PopAckConstants.LOCAL_HOST, PopAckConstants.REVIVE_GROUP, reviveTopic, queueId, newOffset); } reviveOffset = newOffset; consumeReviveObj.newOffset = newOffset; }
@Test public void testReviveMsgFromCk_messageNotFound_needRetry_end() throws Throwable { brokerConfig.setSkipWhenCKRePutReachMaxTimes(true); PopCheckPoint ck = buildPopCheckPoint(0, 0, 0); ck.setRePutTimes("17"); PopReviveService.ConsumeReviveObj reviveObj = new PopReviveService.ConsumeReviveObj(); reviveObj.map.put("", ck); reviveObj.endTime = System.currentTimeMillis(); when(escapeBridge.getMessageAsync(anyString(), anyLong(), anyInt(), anyString(), anyBoolean())) .thenReturn(CompletableFuture.completedFuture(Triple.of(null, "", true))); popReviveService.mergeAndRevive(reviveObj); verify(escapeBridge, times(0)).putMessageToSpecificQueue(any(MessageExtBrokerInner.class)); // write retry verify(messageStore, times(0)).putMessage(any(MessageExtBrokerInner.class)); // rewrite CK }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldDropTheDeleteTopicClause() { // When: final ConfiguredStatement<DropStream> injected = deleteInjector.inject(DROP_WITH_DELETE_TOPIC); // Then: assertThat(injected.getMaskedStatementText(), is("DROP STREAM SOMETHING;")); assertThat("expected !isDeleteTopic", !injected.getStatement().isDeleteTopic()); }
void unhandledException(Exception ex, Request req, Response res) { final String query = req.queryString(); final String uri = req.requestMethod() + " " + (isNotBlank(query) ? req.pathInfo() + "?" + query : req.pathInfo()); LOG.error(format("Unhandled exception on [%s]: %s", uri, ex.getMessage()), ex); res.status(HttpURLConnection.HTTP_INTERNAL_ERROR); res.body(GSON.toJson(Map.of("error", ex.getMessage() == null ? "Internal server error" : ex.getMessage()))); }
@Test void shouldSendUnhandledExceptionResponseWithMessage() { Response response = mock(Response.class); new RoutesHelper(mock(SparkController.class)).unhandledException(RECORD_NOT_FOUND, mock(Request.class), response); verify(response).status(500); verify(response).body("{\"error\":\"Boom!!\"}"); }
public Type resolveReferenceType( Object name ) { if ( name instanceof String ) { ValueMetaInterface valueMeta = this.rowMeta.searchValueMeta( (String) name ); if ( valueMeta != null ) { switch ( valueMeta.getType() ) { case ValueMetaInterface.TYPE_STRING: return TextType.TYPE; case ValueMetaInterface.TYPE_INTEGER: case ValueMetaInterface.TYPE_BIGNUMBER: case ValueMetaInterface.TYPE_NUMBER: return NumberType.GENERIC_NUMBER; default: return AnyType.TYPE; } } } return AnyType.TYPE; }
@Test public void testResolveReferenceTypeWithMetaTypeNumber() { RowMetaInterface row = mock( RowMetaInterface.class ); ValueMetaInterface valueMeta = mock( ValueMetaInterface.class ); RowForumulaContext context = new RowForumulaContext( row ); String name = "name"; when( row.searchValueMeta( name ) ).thenReturn( valueMeta ); when( valueMeta.getType() ).thenReturn( ValueMetaInterface.TYPE_NUMBER ); Type type = context.resolveReferenceType( name ); assertTrue( type instanceof NumberType ); }
public static Optional<PrimitiveBoxed> getKiePMMLPrimitiveBoxed(Class<?> c) { return KIE_PMML_PRIMITIVE_BOXEDS.stream().filter(pBoxed -> c.equals(pBoxed.getPrimitive()) || c.equals(pBoxed.getBoxed())).findFirst(); }
@Test void isSameWithBoxing() { for (int i = 0; i < types; i++) { assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(primitives[i]).get().isSameWithBoxing(boxeds[i])).isTrue(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(boxeds[i]).get().isSameWithBoxing(primitives[i])).isTrue(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(primitives[i]).get().isSameWithBoxing(primitives[i])).isTrue(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(boxeds[i]).get().isSameWithBoxing(boxeds[i])).isTrue(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(primitives[i]).get().isSameWithBoxing(String.class)).isFalse(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(boxeds[i]).get().isSameWithBoxing(String.class)).isFalse(); } }
public void set(String name, String value) { writeFilter.set(name, value, metadata); }
@Test public void testSet() { String[] values = null; Metadata meta = new Metadata(); values = meta.getValues(CONTENTTYPE); assertEquals(0, values.length); meta.set(CONTENTTYPE, "value1"); values = meta.getValues(CONTENTTYPE); assertEquals(1, values.length); assertEquals("value1", values[0]); meta.set(CONTENTTYPE, "value2"); values = meta.getValues(CONTENTTYPE); assertEquals(1, values.length); assertEquals("value2", values[0]); meta.set(CONTENTTYPE, "new value 1"); meta.add("contenttype", "new value 2"); values = meta.getValues(CONTENTTYPE); assertEquals(2, values.length); assertEquals("new value 1", values[0]); assertEquals("new value 2", values[1]); }
public Model parseWithoutDocTypeCleanup(File file) throws PomParseException { try (FileInputStream fis = new FileInputStream(file)) { return parseWithoutDocTypeCleanup(fis); } catch (IOException ex) { if (ex instanceof PomParseException) { throw (PomParseException) ex; } LOGGER.debug("", ex); throw new PomParseException(String.format("Unable to parse pom '%s'", file), ex); } }
@Test(expected = PomParseException.class) public void testParseWithoutDocTypeCleanup_WithDocType() throws Exception { File file = BaseTest.getResourceAsFile(this, "pom/mailapi-1.4.3_doctype.pom"); PomParser instance = new PomParser(); String expVersion = "1.4.3"; Model result = instance.parseWithoutDocTypeCleanup(file); assertEquals("Invalid version extracted", expVersion, result.getParentVersion()); }
@Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { final long now = time.milliseconds(); final KafkaFutureImpl<Collection<ClientMetricsResourceListing>> future = new KafkaFutureImpl<>(); runnable.call(new Call("listClientMetricsResources", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override ListClientMetricsResourcesRequest.Builder createRequest(int timeoutMs) { return new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()); } @Override void handleResponse(AbstractResponse abstractResponse) { ListClientMetricsResourcesResponse response = (ListClientMetricsResourcesResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { future.complete(response.clientMetricsResources()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); return new ListClientMetricsResourcesResult(future); }
@Test public void testListClientMetricsResources() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { List<ClientMetricsResourceListing> expected = asList( new ClientMetricsResourceListing("one"), new ClientMetricsResourceListing("two") ); ListClientMetricsResourcesResponseData responseData = new ListClientMetricsResourcesResponseData().setErrorCode(Errors.NONE.code()); responseData.clientMetricsResources() .add(new ListClientMetricsResourcesResponseData.ClientMetricsResource().setName("one")); responseData.clientMetricsResources() .add((new ListClientMetricsResourcesResponseData.ClientMetricsResource()).setName("two")); env.kafkaClient().prepareResponse( request -> request instanceof ListClientMetricsResourcesRequest, new ListClientMetricsResourcesResponse(responseData)); ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); } }
public static Serializable decode(final ByteBuf byteBuf) { int valueType = byteBuf.readUnsignedByte() & 0xff; StringBuilder result = new StringBuilder(); decodeValue(valueType, 1, byteBuf, result); return result.toString(); }
@Test void assertDecodeSmallJsonObjectWithDouble() { List<JsonEntry> jsonEntries = new LinkedList<>(); jsonEntries.add(new JsonEntry(JsonValueTypes.DOUBLE, "key1", Double.MAX_VALUE)); ByteBuf payload = mockJsonObjectByteBuf(jsonEntries, true); String actual = (String) MySQLJsonValueDecoder.decode(payload); assertThat(actual, is("{\"key1\":1.7976931348623157E308}")); }
@Override public DiscreteResource child(Object child) { checkArgument(!(child instanceof Class<?>)); return Resources.discrete(id.child(child)).resource(); }
@Test public void testChild() { DiscreteResource r1 = Resources.discrete(D1).resource().child(P1); DiscreteResource sameAsR2 = Resources.discrete(D1, P1).resource(); assertThat(r1, is(sameAsR2)); }
public static boolean isValidName(String src) { return DFSUtilClient.isValidName(src); }
@Test (timeout=15000) public void testIsValidName() { String validPaths[] = new String[]{"/", "/bar/"}; for (String path : validPaths) { assertTrue("Should have been accepted '" + path + "'", DFSUtil.isValidName(path)); } String invalidPaths[] = new String[]{"/foo/../bar", "/foo/./bar", "/foo//bar", "/foo/:/bar", "/foo:bar"}; for (String path : invalidPaths) { assertFalse("Should have been rejected '" + path + "'", DFSUtil.isValidName(path)); } String windowsPath = "/C:/foo/bar"; if (Shell.WINDOWS) { assertTrue("Should have been accepted '" + windowsPath + "' in windows os.", DFSUtil.isValidName(windowsPath)); } else { assertFalse("Should have been rejected '" + windowsPath + "' in unix os.", DFSUtil.isValidName(windowsPath)); } }
@VisibleForTesting static void validateFips(final KsqlConfig config, final KsqlRestConfig restConfig) { if (config.getBoolean(ConfluentConfigs.ENABLE_FIPS_CONFIG)) { final FipsValidator fipsValidator = ConfluentConfigs.buildFipsValidator(); // validate cipher suites and TLS version validateCipherSuites(fipsValidator, restConfig); // validate broker validateBroker(fipsValidator, config); // validate ssl endpoint algorithm validateSslEndpointAlgo(fipsValidator, restConfig); // validate schema registry url validateSrUrl(fipsValidator, restConfig); // validate all listeners validateListeners(fipsValidator, restConfig); log.info("FIPS mode enabled for ksqlDB!"); } }
@Test public void shouldFailOnNullSSLEndpointIdentificationAlgorithm() { // Given: final KsqlConfig config = configWith(ImmutableMap.of( ConfluentConfigs.ENABLE_FIPS_CONFIG, true, CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name )); final KsqlRestConfig restConfig = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(KsqlRestConfig.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList("TLS_RSA_WITH_AES_256_CCM")) .build() ); // When: final Exception e = assertThrows( SecurityException.class, () -> KsqlServerMain.validateFips(config, restConfig) ); // Then: assertThat(e.getMessage(), containsString( "The SSL endpoint identification algorithm ('" + KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG + "') is not specified.")); }
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request) throws Exception { BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder(); BundleProcessor bundleProcessor = bundleProcessorCache.get( request, () -> { try { return createBundleProcessor( request.getProcessBundle().getProcessBundleDescriptorId(), request.getProcessBundle()); } catch (IOException e) { throw new RuntimeException(e); } }); try { PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry(); PTransformFunctionRegistry finishFunctionRegistry = bundleProcessor.getFinishFunctionRegistry(); ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker(); try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) { stateTracker.start(request.getInstructionId()); try { // Already in reverse topological order so we don't need to do anything. for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) { LOG.debug("Starting function {}", startFunction); startFunction.run(); } if (request.getProcessBundle().hasElements()) { boolean inputFinished = bundleProcessor .getInboundObserver() .multiplexElements(request.getProcessBundle().getElements()); if (!inputFinished) { throw new RuntimeException( "Elements embedded in ProcessBundleRequest do not contain stream terminators for " + "all data and timer inputs. Unterminated endpoints: " + bundleProcessor.getInboundObserver().getUnfinishedEndpoints()); } } else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) { BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver(); beamFnDataClient.registerReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors(), observer); observer.awaitCompletion(); beamFnDataClient.unregisterReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors()); } // Need to reverse this since we want to call finish in topological order. for (ThrowingRunnable finishFunction : Lists.reverse(finishFunctionRegistry.getFunctions())) { LOG.debug("Finishing function {}", finishFunction); finishFunction.run(); } // If bundleProcessor has not flushed any elements, embed them in response. embedOutboundElementsIfApplicable(response, bundleProcessor); // Add all checkpointed residuals to the response. response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots()); // Add all metrics to the response. bundleProcessor.getProgressRequestLock().lock(); Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor); if (runnerAcceptsShortIds) { response.putAllMonitoringData(monitoringData); } else { for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) { response.addMonitoringInfos( shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue())); } } if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) { finalizeBundleHandler.registerCallbacks( bundleProcessor.getInstructionId(), ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations())); response.setRequiresFinalization(true); } } finally { // We specifically deactivate state tracking while we are holding the progress request and // sampling locks. stateTracker.reset(); } } // Mark the bundle processor as re-usable. bundleProcessorCache.release( request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor); return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response); } catch (Exception e) { // Make sure we clean-up from the active set of bundle processors. bundleProcessorCache.discard(bundleProcessor); throw e; } }
@Test public void testDataProcessingExceptionsArePropagated() throws Exception { BeamFnApi.ProcessBundleDescriptor processBundleDescriptor = BeamFnApi.ProcessBundleDescriptor.newBuilder() .putTransforms( "2L", RunnerApi.PTransform.newBuilder() .setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build()) .build()) .build(); Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry = ImmutableMap.of("1L", processBundleDescriptor); Mockito.doAnswer( (invocation) -> { ByteStringOutputStream encodedData = new ByteStringOutputStream(); StringUtf8Coder.of().encode("A", encodedData); String instructionId = invocation.getArgument(0, String.class); CloseableFnDataReceiver<BeamFnApi.Elements> data = invocation.getArgument(2, CloseableFnDataReceiver.class); data.accept( BeamFnApi.Elements.newBuilder() .addData( BeamFnApi.Elements.Data.newBuilder() .setInstructionId(instructionId) .setTransformId("2L") .setData(encodedData.toByteString()) .setIsLast(true)) .build()); return null; }) .when(beamFnDataClient) .registerReceiver(any(), any(), any()); ProcessBundleHandler handler = new ProcessBundleHandler( PipelineOptionsFactory.create(), Collections.emptySet(), fnApiRegistry::get, beamFnDataClient, null /* beamFnStateGrpcClientCache */, null /* finalizeBundleHandler */, new ShortIdMap(), executionStateSampler, ImmutableMap.of( DATA_INPUT_URN, (PTransformRunnerFactory<Object>) (context) -> { context.addIncomingDataEndpoint( ApiServiceDescriptor.getDefaultInstance(), StringUtf8Coder.of(), (input) -> { throw new IllegalStateException("TestException"); }); return null; }), Caches.noop(), new BundleProcessorCache(), null /* dataSampler */); assertThrows( "TestException", IllegalStateException.class, () -> handler.processBundle( BeamFnApi.InstructionRequest.newBuilder() .setInstructionId("instructionId") .setProcessBundle( BeamFnApi.ProcessBundleRequest.newBuilder() .setProcessBundleDescriptorId("1L")) .build())); // Ensure that we unregister during successful processing verify(beamFnDataClient).registerReceiver(eq("instructionId"), any(), any()); verifyNoMoreInteractions(beamFnDataClient); }
public static double shuffleCompressionRatio( SparkSession spark, FileFormat outputFileFormat, String outputCodec) { if (outputFileFormat == FileFormat.ORC || outputFileFormat == FileFormat.PARQUET) { return columnarCompression(shuffleCodec(spark), outputCodec); } else if (outputFileFormat == FileFormat.AVRO) { return rowBasedCompression(shuffleCodec(spark), outputCodec); } else { return 1.0; } }
@Test public void testOtherFileFormats() { configureShuffle("lz4", true); double ratio = shuffleCompressionRatio(METADATA, "zstd"); assertThat(ratio).isEqualTo(1.0); }
public final ResponseReceiver<?> get() { return request(HttpMethod.GET); }
@Test void testIssue632() throws Exception { disposableServer = createServer() .handle((req, res) -> res.header(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", " + HttpHeaderValues.CLOSE)) .bindNow(); assertThat(disposableServer).isNotNull(); CountDownLatch latch = new CountDownLatch(1); createHttpClientForContextWithPort() .doOnConnected(conn -> conn.channel() .closeFuture() .addListener(future -> latch.countDown())) .get() .uri("/") .responseContent() .blockLast(Duration.ofSeconds(30)); assertThat(latch.await(30, TimeUnit.SECONDS)).isTrue(); }
@Override protected InputStream getRequestStream(Exchange exchange) throws SalesforceException { InputStream request; Message in = exchange.getIn(); request = in.getBody(InputStream.class); if (request == null) { AbstractDTOBase dto = in.getBody(AbstractDTOBase.class); if (dto != null) { // marshall the DTO request = getRequestStream(in, dto); } else { // if all else fails, get body as String final String body = in.getBody(String.class); if (null == body) { String msg = "Unsupported request message body " + (in.getBody() == null ? null : in.getBody().getClass()); throw new SalesforceException(msg, null); } else { request = new ByteArrayInputStream(body.getBytes(StandardCharsets.UTF_8)); } } } return request; }
@Test public void getRequestStream() throws Exception { final SalesforceComponent comp = new SalesforceComponent(); final SalesforceEndpointConfig conf = new SalesforceEndpointConfig(); final OperationName op = OperationName.CREATE_BATCH; final SalesforceEndpoint endpoint = new SalesforceEndpoint("", comp, conf, op, ""); final JsonRestProcessor jsonRestProcessor = new JsonRestProcessor(endpoint); final DefaultCamelContext context = new DefaultCamelContext(); final Exchange exchange = new DefaultExchange(context, ExchangePattern.InOut); final TestObject doc = new TestObject(); doc.setCreationDate(ZonedDateTime.of(1717, 1, 2, 3, 4, 5, 6, ZoneId.systemDefault())); exchange.getIn().setBody(doc); try (InputStream stream = jsonRestProcessor.getRequestStream(exchange); InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8)) { final String result = IOUtils.toString(reader); assertThat(result.length()).isLessThanOrEqualTo(104); } }
@Override public int publish(TopicPath topic, List<OutgoingMessage> outgoingMessages) throws IOException { List<PubsubMessage> pubsubMessages = new ArrayList<>(outgoingMessages.size()); for (OutgoingMessage outgoingMessage : outgoingMessages) { PubsubMessage pubsubMessage = new PubsubMessage().encodeData(outgoingMessage.getMessage().getData().toByteArray()); pubsubMessage.setAttributes(getMessageAttributes(outgoingMessage)); if (!outgoingMessage.getMessage().getOrderingKey().isEmpty()) { pubsubMessage.setOrderingKey(outgoingMessage.getMessage().getOrderingKey()); } // N.B. publishTime and messageId are intentionally not set on the message that is published pubsubMessages.add(pubsubMessage); } PublishRequest request = new PublishRequest().setMessages(pubsubMessages); PublishResponse response = pubsub.projects().topics().publish(topic.getPath(), request).execute(); return response.getMessageIds().size(); }
@Test public void publishOneMessageWithNoTimestampOrIdAttribute() throws IOException { // For this test, create a new PubsubJsonClient without the timestamp attribute // or id attribute set. client = new PubsubJsonClient(null, null, mockPubsub); String expectedTopic = TOPIC.getPath(); PubsubMessage expectedPubsubMessage = new PubsubMessage() .encodeData(DATA.getBytes(StandardCharsets.UTF_8)) .setAttributes(ImmutableMap.<String, String>builder().put("k", "v").build()); PublishRequest expectedRequest = new PublishRequest().setMessages(ImmutableList.of(expectedPubsubMessage)); PublishResponse expectedResponse = new PublishResponse().setMessageIds(ImmutableList.of(MESSAGE_ID)); when((Object) mockPubsub.projects().topics().publish(expectedTopic, expectedRequest).execute()) .thenReturn(expectedResponse); Map<String, String> attrs = new HashMap<>(); attrs.put("k", "v"); OutgoingMessage actualMessage = OutgoingMessage.of( com.google.pubsub.v1.PubsubMessage.newBuilder() .setData(ByteString.copyFromUtf8(DATA)) .putAllAttributes(attrs) .build(), MESSAGE_TIME, RECORD_ID, null); int n = client.publish(TOPIC, ImmutableList.of(actualMessage)); assertEquals(1, n); }
@VisibleForTesting void validateCaptcha(AuthLoginReqVO reqVO) { // 如果验证码关闭,则不进行校验 if (!captchaEnable) { return; } // 校验验证码 ValidationUtils.validate(validator, reqVO, AuthLoginReqVO.CodeEnableGroup.class); CaptchaVO captchaVO = new CaptchaVO(); captchaVO.setCaptchaVerification(reqVO.getCaptchaVerification()); ResponseModel response = captchaService.verification(captchaVO); // 验证不通过 if (!response.isSuccess()) { // 创建登录失败日志(验证码不正确) createLoginLog(null, reqVO.getUsername(), LoginLogTypeEnum.LOGIN_USERNAME, LoginResultEnum.CAPTCHA_CODE_ERROR); throw exception(AUTH_LOGIN_CAPTCHA_CODE_ERROR, response.getRepMsg()); } }
@Test public void testValidateCaptcha_constraintViolationException() { // 准备参数 AuthLoginReqVO reqVO = randomPojo(AuthLoginReqVO.class).setCaptchaVerification(null); // mock 验证码打开 ReflectUtil.setFieldValue(authService, "captchaEnable", true); // 调用,并断言异常 assertThrows(ConstraintViolationException.class, () -> authService.validateCaptcha(reqVO), "验证码不能为空"); }
public FileReferenceData(FileReference fileReference, String filename, Type type, CompressionType compressionType) { this.fileReference = fileReference; this.filename = filename; this.type = type; this.compressionType = compressionType; }
@Test public void testFileReferenceData() throws IOException { String content = "blobbblubbblabb"; File file = writeTempFile(content); FileReferenceData fileReferenceData = new LazyFileReferenceData(new FileReference("ref"), "foo", Type.compressed, file, CompressionType.gzip); ByteBuffer byteBuffer = ByteBuffer.allocate(10); assertEquals(10, fileReferenceData.nextContent(byteBuffer)); assertEquals(content.substring(0,10), Utf8.toString(Arrays.copyOfRange(byteBuffer.array(), 0, 10))); byteBuffer.flip(); assertEquals(5, fileReferenceData.nextContent(byteBuffer)); assertEquals(content.substring(10,15), Utf8.toString(Arrays.copyOfRange(byteBuffer.array(), 0, 5))); // nextContent() will always return everything for FileReferenceDataBlob, so nothing more should be read assertEquals(-1, fileReferenceData.nextContent(byteBuffer)); assertTrue(file.exists()); fileReferenceData.close(); assertTrue(file.exists()); // file should not be removed }
@Override public UnboundFunction loadFunction(Identifier ident) throws NoSuchFunctionException { try { return super.loadFunction(ident); } catch (NoSuchFunctionException e) { return getSessionCatalog().loadFunction(ident); } }
@Test public void testLoadFunction() { String functionClass = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper"; // load permanent UDF in Hive via FunctionCatalog spark.sql(String.format("CREATE FUNCTION perm_upper AS '%s'", functionClass)); assertThat(scalarSql("SELECT perm_upper('xyz')")) .as("Load permanent UDF in Hive") .isEqualTo("XYZ"); // load temporary UDF in Hive via FunctionCatalog spark.sql(String.format("CREATE TEMPORARY FUNCTION temp_upper AS '%s'", functionClass)); assertThat(scalarSql("SELECT temp_upper('xyz')")) .as("Load temporary UDF in Hive") .isEqualTo("XYZ"); // TODO: fix loading Iceberg built-in functions in SessionCatalog }
@Override protected void channelRead0(ChannelHandlerContext ctx, String msg) throws Exception { if (StringUtils.isBlank(msg)) { ctx.writeAndFlush(QosProcessHandler.PROMPT); } else { CommandContext commandContext = TelnetCommandDecoder.decode(msg); commandContext.setQosConfiguration(qosConfiguration); commandContext.setRemote(ctx.channel()); try { String result = commandExecutor.execute(commandContext); if (StringUtils.isEquals(QosConstants.CLOSE, result)) { ctx.writeAndFlush(getByeLabel()).addListener(ChannelFutureListener.CLOSE); } else { ctx.writeAndFlush(result + QosConstants.BR_STR + QosProcessHandler.PROMPT); } } catch (NoSuchCommandException ex) { ctx.writeAndFlush(msg + " :no such command"); ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT); log.error(QOS_COMMAND_NOT_FOUND, "", "", "can not found command " + commandContext, ex); } catch (PermissionDenyException ex) { ctx.writeAndFlush(msg + " :permission deny"); ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT); log.error( QOS_PERMISSION_DENY_EXCEPTION, "", "", "permission deny to access command " + commandContext, ex); } catch (Exception ex) { ctx.writeAndFlush(msg + " :fail to execute commandContext by " + ex.getMessage()); ctx.writeAndFlush(QosConstants.BR_STR + QosProcessHandler.PROMPT); log.error( QOS_UNEXPECTED_EXCEPTION, "", "", "execute commandContext got exception " + commandContext, ex); } } }
@Test void testPrompt() throws Exception { ChannelHandlerContext context = mock(ChannelHandlerContext.class); TelnetProcessHandler handler = new TelnetProcessHandler( FrameworkModel.defaultModel(), QosConfiguration.builder() .anonymousAccessPermissionLevel(PermissionLevel.NONE.name()) .build()); handler.channelRead0(context, ""); verify(context).writeAndFlush(QosProcessHandler.PROMPT); }
public static String getUniqueName(String oldName, List<String> checklist) { return getUniqueName(Collections.singletonList(oldName), checklist).get(0); }
@Test void testGetUniqueName() { assertThat( RowTypeUtils.getUniqueName( Arrays.asList("Dave", "Evan"), Arrays.asList("Alice", "Bob"))) .isEqualTo(Arrays.asList("Dave", "Evan")); assertThat( RowTypeUtils.getUniqueName( Arrays.asList("Bob", "Bob", "Dave", "Alice"), Arrays.asList("Alice", "Bob"))) .isEqualTo(Arrays.asList("Bob_0", "Bob_1", "Dave", "Alice_0")); }
@Override public SinkConfig getSinkConfig() { return SinkConfigUtils.convertFromDetails(config.getFunctionDetails()); }
@Test public void testGetSinkConfig() { SinkContext sinkContext = context; SinkConfig sinkConfig = sinkContext.getSinkConfig(); Assert.assertNotNull(sinkConfig); }
@Override @SuppressWarnings("unchecked") public Future<Map<String, Object>> createOrUpdatePutRequest( Reconciliation reconciliation, String host, int port, String connectorName, JsonObject configJson) { Buffer data = configJson.toBuffer(); String path = "/connectors/" + connectorName + "/config"; LOGGER.debugCr(reconciliation, "Making PUT request to {} with body {}", path, configJson); return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.PUT, port, host, path, request -> { if (request.succeeded()) { request.result().setFollowRedirects(true) .putHeader("Accept", "application/json") .putHeader("Content-Type", "application/json") .putHeader("Content-Length", String.valueOf(data.length())) .write(data); request.result().send(response -> { if (response.succeeded()) { if (response.result().statusCode() == 200 || response.result().statusCode() == 201) { response.result().bodyHandler(buffer -> { try { @SuppressWarnings({ "rawtypes" }) Map t = mapper.readValue(buffer.getBytes(), Map.class); LOGGER.debugCr(reconciliation, "Got {} response to PUT request to {}: {}", response.result().statusCode(), path, t); result.complete(t); } catch (IOException e) { result.fail(new ConnectRestException(response.result(), "Could not deserialize response: " + e)); } }); } else { // TODO Handle 409 (Conflict) indicating a rebalance in progress LOGGER.debugCr(reconciliation, "Got {} response to PUT request to {}", response.result().statusCode(), path); response.result().bodyHandler(buffer -> { result.fail(new ConnectRestException(response.result(), tryToExtractErrorMessage(reconciliation, buffer))); }); } } else { result.tryFail(response.cause()); } }); } else { result.fail(request.cause()); } })); }
@Test public void testFeatureCompletionWithBadlyFormattedError(Vertx vertx, VertxTestContext context) throws ExecutionException, InterruptedException { HttpServer server = mockApi(vertx, 500, "Some error message"); KafkaConnectApi api = new KafkaConnectApiImpl(vertx); Checkpoint async = context.checkpoint(); api.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "127.0.0.1", server.actualPort(), "my-connector", new JsonObject()) .onComplete(context.failing(res -> context.verify(() -> { assertThat(res.getMessage(), containsString("Unknown error message")); server.close(); async.flag(); }))); }
@Override public int actionStop(String appName) throws IOException, YarnException { int result = EXIT_SUCCESS; try { Service service = new Service(); service.setName(appName); service.setState(ServiceState.STOPPED); String buffer = jsonSerDeser.toJson(service); ClientResponse response = getApiClient(getServicePath(appName)) .put(ClientResponse.class, buffer); result = processResponse(response); } catch (Exception e) { LOG.error("Fail to stop application: ", e); result = EXIT_EXCEPTION_THROWN; } return result; }
@Test void testStop() { String appName = "example-app"; try { int result = asc.actionStop(appName); assertEquals(EXIT_SUCCESS, result); } catch (IOException | YarnException e) { fail(); } }
@VisibleForTesting protected List<MessageFilter> getFilterRegistry() { return filterRegistry; }
@Test public void testFiltersAreOrdered() { final DummyFilter third = new DummyFilter(30); final DummyFilter first = new DummyFilter(10); final DummyFilter second = new DummyFilter(20); final Set<MessageFilter> filters = ImmutableSet.of(third, first, second); final MessageFilterChainProcessor processor = new MessageFilterChainProcessor(new MetricRegistry(), filters, acknowledger, serverStatus); final List<MessageFilter> filterRegistry = processor.getFilterRegistry(); Assert.assertEquals(filterRegistry.get(0), first); Assert.assertEquals(filterRegistry.get(1), second); Assert.assertEquals(filterRegistry.get(2), third); }
@Override public ObjectOutput serialize(URL url, OutputStream out) throws IOException { Hessian2FactoryManager hessian2FactoryManager = Optional.ofNullable(url) .map(URL::getOrDefaultFrameworkModel) .orElseGet(FrameworkModel::defaultModel) .getBeanFactory() .getBean(Hessian2FactoryManager.class); return new Hessian2ObjectOutput(out, hessian2FactoryManager); }
@Test void testLimit3() throws IOException, ClassNotFoundException { FrameworkModel frameworkModel = new FrameworkModel(); Serialization serialization = frameworkModel.getExtensionLoader(Serialization.class).getExtension("hessian2"); URL url = URL.valueOf("").setScopeModel(frameworkModel); // write un-serializable failed TrustedNotSerializable trustedPojo = new TrustedNotSerializable(ThreadLocalRandom.current().nextDouble()); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); ObjectOutput objectOutput = serialization.serialize(url, outputStream); Assertions.assertThrows(IOException.class, () -> objectOutput.writeObject(trustedPojo)); frameworkModel.destroy(); }
@Description("Returns a geometry that represents the point set union of the input geometries.") @ScalarFunction("ST_Union") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stUnion(@SqlType(GEOMETRY_TYPE_NAME) Slice left, @SqlType(GEOMETRY_TYPE_NAME) Slice right) { return stUnion(ImmutableList.of(left, right)); }
@Test public void testSTUnion() { List<String> emptyWkts = ImmutableList.of( "POINT EMPTY", "MULTIPOINT EMPTY", "LINESTRING EMPTY", "MULTILINESTRING EMPTY", "POLYGON EMPTY", "MULTIPOLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"); List<String> simpleWkts = ImmutableList.of( "POINT (1 2)", "MULTIPOINT ((1 2), (3 4))", "LINESTRING (0 0, 2 2, 4 4)", "MULTILINESTRING ((0 0, 2 2, 4 4), (5 5, 7 7, 9 9))", "POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", "MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))", "GEOMETRYCOLLECTION (LINESTRING (0 5, 5 5), POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1)))"); // empty geometry for (String emptyWkt : emptyWkts) { for (String simpleWkt : simpleWkts) { assertUnion(emptyWkt, simpleWkt, simpleWkt); } } // self union for (String simpleWkt : simpleWkts) { assertUnion(simpleWkt, simpleWkt, simpleWkt); } // touching union assertUnion("POINT (1 2)", "MULTIPOINT ((1 2), (3 4))", "MULTIPOINT ((1 2), (3 4))"); assertUnion("MULTIPOINT ((1 2))", "MULTIPOINT ((1 2), (3 4))", "MULTIPOINT ((1 2), (3 4))"); assertUnion("LINESTRING (0 1, 1 2)", "LINESTRING (1 2, 3 4)", "LINESTRING (0 1, 1 2, 3 4)"); assertUnion("MULTILINESTRING ((0 0, 2 2, 4 4), (5 5, 7 7, 9 9))", "MULTILINESTRING ((5 5, 7 7, 9 9), (11 11, 13 13, 15 15))", "MULTILINESTRING ((0 0, 2 2, 4 4), (5 5, 7 7, 9 9), (11 11, 13 13, 15 15))"); assertUnion("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", "POLYGON ((1 0, 2 0, 2 1, 1 1, 1 0))", "POLYGON ((0 0, 0 1, 1 1, 2 1, 2 0, 1 0, 0 0))"); assertUnion("MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)))", "MULTIPOLYGON (((1 0, 2 0, 2 1, 1 1, 1 0)))", "POLYGON ((0 0, 0 1, 1 1, 2 1, 2 0, 1 0, 0 0))"); assertUnion("GEOMETRYCOLLECTION (POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0)), POINT (1 2))", "GEOMETRYCOLLECTION (POLYGON ((1 0, 2 0, 2 1, 1 1, 1 0)), MULTIPOINT ((1 2), (3 4)))", "GEOMETRYCOLLECTION (MULTIPOINT ((1 2), (3 4)), POLYGON ((0 0, 0 1, 1 1, 2 1, 2 0, 1 0, 0 0)))"); // within union assertUnion("MULTIPOINT ((20 20), (25 25))", "POINT (25 25)", "MULTIPOINT ((20 20), (25 25))"); assertUnion("LINESTRING (20 20, 30 30)", "POINT (25 25)", "LINESTRING (20 20, 25 25, 30 30)"); assertUnion("LINESTRING (20 20, 30 30)", "LINESTRING (25 25, 27 27)", "LINESTRING (20 20, 25 25, 27 27, 30 30)"); assertUnion("POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))", "POLYGON ((1 1, 1 2, 2 2, 2 1, 1 1))", "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"); assertUnion("MULTIPOLYGON (((0 0, 0 2, 2 2, 2 0, 0 0)), ((2 2, 2 4, 4 4, 4 2, 2 2)))", "POLYGON ((2 2, 2 3, 3 3, 3 2, 2 2))", "MULTIPOLYGON (((2 2, 2 3, 2 4, 4 4, 4 2, 3 2, 2 2)), ((0 0, 0 2, 2 2, 2 0, 0 0)))"); assertUnion("GEOMETRYCOLLECTION (POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0)), MULTIPOINT ((20 20), (25 25)))", "GEOMETRYCOLLECTION (POLYGON ((1 1, 1 2, 2 2, 2 1, 1 1)), POINT (25 25))", "GEOMETRYCOLLECTION (MULTIPOINT ((20 20), (25 25)), POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0)))"); // overlap union assertUnion("LINESTRING (1 1, 3 1)", "LINESTRING (2 1, 4 1)", "LINESTRING (1 1, 2 1, 3 1, 4 1)"); assertUnion("MULTILINESTRING ((1 1, 3 1))", "MULTILINESTRING ((2 1, 4 1))", "LINESTRING (1 1, 2 1, 3 1, 4 1)"); assertUnion("POLYGON ((1 1, 3 1, 3 3, 1 3, 1 1))", "POLYGON ((2 2, 4 2, 4 4, 2 4, 2 2))", "POLYGON ((1 1, 1 3, 2 3, 2 4, 4 4, 4 2, 3 2, 3 1, 1 1))"); assertUnion("MULTIPOLYGON (((1 1, 3 1, 3 3, 1 3, 1 1)))", "MULTIPOLYGON (((2 2, 4 2, 4 4, 2 4, 2 2)))", "POLYGON ((1 1, 1 3, 2 3, 2 4, 4 4, 4 2, 3 2, 3 1, 1 1))"); assertUnion("GEOMETRYCOLLECTION (POLYGON ((1 1, 3 1, 3 3, 1 3, 1 1)), LINESTRING (1 1, 3 1))", "GEOMETRYCOLLECTION (POLYGON ((2 2, 4 2, 4 4, 2 4, 2 2)), LINESTRING (2 1, 4 1))", "GEOMETRYCOLLECTION (LINESTRING (3 1, 4 1), POLYGON ((1 1, 1 3, 2 3, 2 4, 4 4, 4 2, 3 2, 3 1, 2 1, 1 1)))"); // Union hanging bug: https://github.com/Esri/geometry-api-java/issues/266 assertUnion( "POINT (-44.16176186699087 -19.943264803833348)", "LINESTRING (-44.1247493 -19.9467657, -44.1247979 -19.9468385, -44.1249043 -19.946934, -44.1251096 -19.9470651, -44.1252609 -19.9471383, -44.1254992 -19.947204, -44.1257652 -19.947229, -44.1261292 -19.9471833, -44.1268946 -19.9470098, -44.1276847 -19.9468416, -44.127831 -19.9468143, -44.1282639 -19.9467366, -44.1284569 -19.9467237, -44.1287119 -19.9467261, -44.1289437 -19.9467665, -44.1291499 -19.9468221, -44.1293856 -19.9469396, -44.1298857 -19.9471497, -44.1300908 -19.9472071, -44.1302743 -19.9472331, -44.1305029 -19.9472364, -44.1306498 -19.9472275, -44.1308054 -19.947216, -44.1308553 -19.9472037, -44.1313206 -19.9471394, -44.1317889 -19.9470854, -44.1330422 -19.9468887, -44.1337465 -19.9467083, -44.1339922 -19.9466842, -44.1341506 -19.9466997, -44.1343621 -19.9467226, -44.1345134 -19.9467855, -44.1346494 -19.9468456, -44.1347295 -19.946881, -44.1347988 -19.9469299, -44.1350231 -19.9471131, -44.1355843 -19.9478307, -44.1357802 -19.9480557, -44.1366289 -19.949198, -44.1370384 -19.9497001, -44.137386 -19.9501921, -44.1374113 -19.9502263, -44.1380888 -19.9510925, -44.1381769 -19.9513526, -44.1382509 -19.9516202, -44.1383014 -19.9522136, -44.1383889 -19.9530931, -44.1384227 -19.9538784, -44.1384512 -19.9539653, -44.1384555 -19.9539807, -44.1384901 -19.9541928, -44.1385563 -19.9543859, -44.1386656 -19.9545781, -44.1387339 -19.9546889, -44.1389219 -19.9548661, -44.1391695 -19.9550384, -44.1393672 -19.9551414, -44.1397538 -19.9552208, -44.1401714 -19.9552332, -44.1405656 -19.9551143, -44.1406198 -19.9550853, -44.1407579 -19.9550224, -44.1409029 -19.9549201, -44.1410283 -19.9548257, -44.1413902 -19.9544132, -44.141835 -19.9539274, -44.142268 -19.953484, -44.1427036 -19.9531023, -44.1436229 -19.952259, -44.1437568 -19.9521565, -44.1441783 -19.9517273, -44.144644 -19.9512109, -44.1452538 -19.9505663, -44.1453541 -19.9504774, -44.1458653 -19.9500442, -44.1463563 -19.9496473, -44.1467534 -19.9492812, -44.1470553 -19.9490028, -44.1475804 -19.9485293, -44.1479838 -19.9482096, -44.1485003 -19.9478532, -44.1489451 -19.9477314, -44.1492225 -19.9477024, -44.149453 -19.9476684, -44.149694 -19.9476387, -44.1499556 -19.9475436, -44.1501398 -19.9474234, -44.1502723 -19.9473206, -44.150421 -19.9471473, -44.1505043 -19.9470004, -44.1507664 -19.9462594, -44.150867 -19.9459518, -44.1509225 -19.9457843, -44.1511168 -19.945466, -44.1513601 -19.9452272, -44.1516846 -19.944999, -44.15197 -19.9448738, -44.1525994 -19.9447263, -44.1536614 -19.9444791, -44.1544071 -19.9442671, -44.1548978 -19.9441275, -44.1556247 -19.9438304, -44.1565996 -19.9434083, -44.1570351 -19.9432556, -44.1573142 -19.9432091, -44.1575332 -19.9431645, -44.157931 -19.9431484, -44.1586408 -19.9431504, -44.1593575 -19.9431457, -44.1596498 -19.9431562, -44.1600991 -19.9431475, -44.1602331 -19.9431567, -44.1607926 -19.9432449, -44.1609723 -19.9432499, -44.1623815 -19.9432765, -44.1628299 -19.9433645, -44.1632475 -19.9435839, -44.1633456 -19.9436559, -44.1636261 -19.9439375, -44.1638186 -19.9442439, -44.1642535 -19.9451781, -44.165178 -19.947156, -44.1652928 -19.9474016, -44.1653074 -19.9474329, -44.1654026 -19.947766, -44.1654774 -19.9481718, -44.1655699 -19.9490241, -44.1656196 -19.9491538, -44.1659735 -19.9499097, -44.1662485 -19.9504925, -44.1662996 -19.9506347, -44.1663574 -19.9512961, -44.1664094 -19.9519273, -44.1664144 -19.9519881, -44.1664799 -19.9526399, -44.1666965 -19.9532586, -44.1671191 -19.9544126, -44.1672019 -19.9545869, -44.1673344 -19.9547603, -44.1675958 -19.9550466, -44.1692349 -19.9567775, -44.1694607 -19.9569284, -44.1718843 -19.9574147, -44.1719167 -19.9574206, -44.1721627 -19.9574748, -44.1723207 -19.9575386, -44.1724439 -19.9575883, -44.1742798 -19.9583293, -44.1748841 -19.9585688, -44.1751118 -19.9586796, -44.1752554 -19.9587769, -44.1752644 -19.9587881, -44.1756052 -19.9592143, -44.1766415 -19.9602689, -44.1774912 -19.9612387, -44.177663 -19.961364, -44.177856 -19.9614494, -44.178034 -19.9615125, -44.1782475 -19.9615423, -44.1785115 -19.9615155, -44.1795404 -19.9610879, -44.1796393 -19.9610759, -44.1798873 -19.9610459, -44.1802404 -19.961036, -44.1804714 -19.9609634, -44.181059 -19.9605365, -44.1815113 -19.9602333, -44.1826712 -19.9594067, -44.1829715 -19.9592551, -44.1837201 -19.9590611, -44.1839277 -19.9590073, -44.1853022 -19.9586512, -44.1856812 -19.9585316, -44.1862915 -19.9584212, -44.1866215 -19.9583494, -44.1867651 -19.9583391, -44.1868852 -19.9583372, -44.1872523 -19.9583313, -44.187823 -19.9583281, -44.1884457 -19.958351, -44.1889559 -19.958437, -44.1893825 -19.9585816, -44.1897582 -19.9587828, -44.1901186 -19.9590453, -44.1912457 -19.9602029, -44.1916575 -19.9606307, -44.1921624 -19.9611588, -44.1925367 -19.9615872, -44.1931832 -19.9622566, -44.1938468 -19.9629343, -44.194089 -19.9631996, -44.1943924 -19.9634141, -44.1946006 -19.9635104, -44.1948789 -19.963599, -44.1957402 -19.9637569, -44.1964094 -19.9638505, -44.1965875 -19.9639188, -44.1967865 -19.9640801, -44.197096 -19.9643572, -44.1972765 -19.964458, -44.1974407 -19.9644824, -44.1976234 -19.9644668, -44.1977654 -19.9644282, -44.1980715 -19.96417, -44.1984541 -19.9638069, -44.1986632 -19.9636002, -44.1988132 -19.9634172, -44.1989542 -19.9632962, -44.1991349 -19.9631081)", "LINESTRING (-44.1247493 -19.9467657, -44.1247979 -19.9468385, -44.1249043 -19.946934, -44.1251096 -19.9470651, -44.1252609 -19.9471383, -44.1254992 -19.947204, -44.1257652 -19.947229, -44.1261292 -19.9471833, -44.1268946 -19.9470098, -44.1276847 -19.9468416, -44.127831 -19.9468143, -44.1282639 -19.9467366, -44.1284569 -19.9467237, -44.1287119 -19.9467261, -44.1289437 -19.9467665, -44.1291499 -19.9468221, -44.1293856 -19.9469396, -44.1298857 -19.9471497, -44.1300908 -19.9472071, -44.1302743 -19.9472331, -44.1305029 -19.9472364, -44.1306498 -19.9472275, -44.1308054 -19.947216, -44.1308553 -19.9472037, -44.1313206 -19.9471394, -44.1317889 -19.9470854, -44.1330422 -19.9468887, -44.1337465 -19.9467083, -44.1339922 -19.9466842, -44.1341506 -19.9466997, -44.1343621 -19.9467226, -44.1345134 -19.9467855, -44.1346494 -19.9468456, -44.1347295 -19.946881, -44.1347988 -19.9469299, -44.1350231 -19.9471131, -44.1355843 -19.9478307, -44.1357802 -19.9480557, -44.1366289 -19.949198, -44.1370384 -19.9497001, -44.137386 -19.9501921, -44.1374113 -19.9502263, -44.1380888 -19.9510925, -44.1381769 -19.9513526, -44.1382509 -19.9516202, -44.1383014 -19.9522136, -44.1383889 -19.9530931, -44.1384227 -19.9538784, -44.1384512 -19.9539653, -44.1384555 -19.9539807, -44.1384901 -19.9541928, -44.1385563 -19.9543859, -44.1386656 -19.9545781, -44.1387339 -19.9546889, -44.1389219 -19.9548661, -44.1391695 -19.9550384, -44.1393672 -19.9551414, -44.1397538 -19.9552208, -44.1401714 -19.9552332, -44.1405656 -19.9551143, -44.1406198 -19.9550853, -44.1407579 -19.9550224, -44.1409029 -19.9549201, -44.1410283 -19.9548257, -44.1413902 -19.9544132, -44.141835 -19.9539274, -44.142268 -19.953484, -44.1427036 -19.9531023, -44.1436229 -19.952259, -44.1437568 -19.9521565, -44.1441783 -19.9517273, -44.144644 -19.9512109, -44.1452538 -19.9505663, -44.1453541 -19.9504774, -44.1458653 -19.9500442, -44.1463563 -19.9496473, -44.1467534 -19.9492812, -44.1470553 -19.9490028, -44.1475804 -19.9485293, -44.1479838 -19.9482096, -44.1485003 -19.9478532, -44.1489451 -19.9477314, -44.1492225 -19.9477024, -44.149453 -19.9476684, -44.149694 -19.9476387, -44.1499556 -19.9475436, -44.1501398 -19.9474234, -44.1502723 -19.9473206, -44.150421 -19.9471473, -44.1505043 -19.9470004, -44.1507664 -19.9462594, -44.150867 -19.9459518, -44.1509225 -19.9457843, -44.1511168 -19.945466, -44.1513601 -19.9452272, -44.1516846 -19.944999, -44.15197 -19.9448738, -44.1525994 -19.9447263, -44.1536614 -19.9444791, -44.1544071 -19.9442671, -44.1548978 -19.9441275, -44.1556247 -19.9438304, -44.1565996 -19.9434083, -44.1570351 -19.9432556, -44.1573142 -19.9432091, -44.1575332 -19.9431645, -44.157931 -19.9431484, -44.1586408 -19.9431504, -44.1593575 -19.9431457, -44.1596498 -19.9431562, -44.1600991 -19.9431475, -44.1602331 -19.9431567, -44.1607926 -19.9432449, -44.1609723 -19.9432499, -44.16176186699087 -19.94326480383335, -44.1623815 -19.9432765, -44.1628299 -19.9433645, -44.1632475 -19.9435839, -44.1633456 -19.9436559, -44.1636261 -19.9439375, -44.1638186 -19.9442439, -44.1642535 -19.9451781, -44.165178 -19.947156, -44.1652928 -19.9474016, -44.1653074 -19.9474329, -44.1654026 -19.947766, -44.1654774 -19.9481718, -44.1655699 -19.9490241, -44.1656196 -19.9491538, -44.1659735 -19.9499097, -44.1662485 -19.9504925, -44.1662996 -19.9506347, -44.1663574 -19.9512961, -44.1664094 -19.9519273, -44.1664144 -19.9519881, -44.1664799 -19.9526399, -44.1666965 -19.9532586, -44.1671191 -19.9544126, -44.1672019 -19.9545869, -44.1673344 -19.9547603, -44.1675958 -19.9550466, -44.1692349 -19.9567775, -44.1694607 -19.9569284, -44.1718843 -19.9574147, -44.1719167 -19.9574206, -44.1721627 -19.9574748, -44.1723207 -19.9575386, -44.1724439 -19.9575883, -44.1742798 -19.9583293, -44.1748841 -19.9585688, -44.1751118 -19.9586796, -44.1752554 -19.9587769, -44.1752644 -19.9587881, -44.1756052 -19.9592143, -44.1766415 -19.9602689, -44.1774912 -19.9612387, -44.177663 -19.961364, -44.177856 -19.9614494, -44.178034 -19.9615125, -44.1782475 -19.9615423, -44.1785115 -19.9615155, -44.1795404 -19.9610879, -44.1796393 -19.9610759, -44.1798873 -19.9610459, -44.1802404 -19.961036, -44.1804714 -19.9609634, -44.181059 -19.9605365, -44.1815113 -19.9602333, -44.1826712 -19.9594067, -44.1829715 -19.9592551, -44.1837201 -19.9590611, -44.1839277 -19.9590073, -44.1853022 -19.9586512, -44.1856812 -19.9585316, -44.1862915 -19.9584212, -44.1866215 -19.9583494, -44.1867651 -19.9583391, -44.1868852 -19.9583372, -44.1872523 -19.9583313, -44.187823 -19.9583281, -44.1884457 -19.958351, -44.1889559 -19.958437, -44.1893825 -19.9585816, -44.1897582 -19.9587828, -44.1901186 -19.9590453, -44.1912457 -19.9602029, -44.1916575 -19.9606307, -44.1921624 -19.9611588, -44.1925367 -19.9615872, -44.1931832 -19.9622566, -44.1938468 -19.9629343, -44.194089 -19.9631996, -44.1943924 -19.9634141, -44.1946006 -19.9635104, -44.1948789 -19.963599, -44.1957402 -19.9637569, -44.1964094 -19.9638505, -44.1965875 -19.9639188, -44.1967865 -19.9640801, -44.197096 -19.9643572, -44.1972765 -19.964458, -44.1974407 -19.9644824, -44.1976234 -19.9644668, -44.1977654 -19.9644282, -44.1980715 -19.96417, -44.1984541 -19.9638069, -44.1986632 -19.9636002, -44.1988132 -19.9634172, -44.1989542 -19.9632962, -44.1991349 -19.9631081)"); }
@Override public byte[] serialize(final String topic, final ValueAndTimestamp<V> data) { if (data == null) { return null; } return serialize(topic, data.value(), data.timestamp()); }
@Test public void shouldSerializeNonNullDataUsingTheInternalSerializer() { final String value = "some-string"; final ValueAndTimestamp<String> valueAndTimestamp = ValueAndTimestamp.make(value, TIMESTAMP); final byte[] serialized = STRING_SERDE.serializer().serialize(TOPIC, valueAndTimestamp); assertThat(serialized, is(notNullValue())); final ValueAndTimestamp<String> deserialized = STRING_SERDE.deserializer().deserialize(TOPIC, serialized); assertThat(deserialized, is(valueAndTimestamp)); }
public boolean commitAt(final long firstLogIndex, final long lastLogIndex, final PeerId peer) { // TODO use lock-free algorithm here? final long stamp = this.stampedLock.writeLock(); long lastCommittedIndex = 0; try { if (this.pendingIndex == 0) { return false; } if (lastLogIndex < this.pendingIndex) { return true; } if (lastLogIndex >= this.pendingIndex + this.pendingMetaQueue.size()) { throw new ArrayIndexOutOfBoundsException(); } final long startAt = Math.max(this.pendingIndex, firstLogIndex); Ballot.PosHint hint = new Ballot.PosHint(); for (long logIndex = startAt; logIndex <= lastLogIndex; logIndex++) { final Ballot bl = this.pendingMetaQueue.get((int) (logIndex - this.pendingIndex)); hint = bl.grant(peer, hint); if (bl.isGranted()) { lastCommittedIndex = logIndex; } } if (lastCommittedIndex == 0) { return true; } // When removing a peer off the raft group which contains even number of // peers, the quorum would decrease by 1, e.g. 3 of 4 changes to 2 of 3. In // this case, the log after removal may be committed before some previous // logs, since we use the new configuration to deal the quorum of the // removal request, we think it's safe to commit all the uncommitted // previous logs, which is not well proved right now this.pendingMetaQueue.removeFromFirst((int) (lastCommittedIndex - this.pendingIndex) + 1); LOG.debug("Node {} committed log fromIndex={}, toIndex={}.", this.opts.getNodeId(), this.pendingIndex, lastCommittedIndex); this.pendingIndex = lastCommittedIndex + 1; this.lastCommittedIndex = lastCommittedIndex; } finally { this.stampedLock.unlockWrite(stamp); } this.waiter.onCommitted(lastCommittedIndex); return true; }
@Test public void testCommitAt() { assertFalse(this.box.commitAt(1, 3, new PeerId("localhost", 8081))); assertTrue(box.resetPendingIndex(1)); assertTrue(this.box.appendPendingTask( JRaftUtils.getConfiguration("localhost:8081,localhost:8082,localhost:8083"), JRaftUtils.getConfiguration("localhost:8081"), new Closure() { @Override public void run(Status status) { } })); assertEquals(0, this.box.getLastCommittedIndex()); try { this.box.commitAt(1, 3, new PeerId("localhost", 8081)); fail(); } catch (ArrayIndexOutOfBoundsException e) { } assertTrue(this.box.commitAt(1, 1, new PeerId("localhost", 8081))); assertEquals(0, this.box.getLastCommittedIndex()); assertEquals(1, this.box.getPendingIndex()); assertTrue(this.box.commitAt(1, 1, new PeerId("localhost", 8082))); assertEquals(1, this.box.getLastCommittedIndex()); assertEquals(2, this.box.getPendingIndex()); Mockito.verify(this.waiter, Mockito.only()).onCommitted(1); }
public static Write write() { return new Write(null /* Configuration */, ""); }
@Test public void testWritingFailsTableDoesNotExist() { final String table = tmpTable.getName(); // Exception will be thrown by write.expand() when writeToDynamic is applied. thrown.expect(IllegalArgumentException.class); thrown.expectMessage(String.format("Table %s does not exist", table)); p.apply(Create.empty(HBaseMutationCoder.of())) .apply("write", HBaseIO.write().withConfiguration(conf).withTableId(table)); }
@Override public DefaultCryptoKeyReaderConfigurationData clone() { DefaultCryptoKeyReaderConfigurationData clone = new DefaultCryptoKeyReaderConfigurationData(); if (defaultPublicKey != null) { clone.setDefaultPublicKey(defaultPublicKey); } if (defaultPrivateKey != null) { clone.setDefaultPrivateKey(defaultPrivateKey); } if (publicKeys != null) { clone.setPublicKeys(new HashMap<String, String>(publicKeys)); } if (privateKeys != null) { clone.setPrivateKeys(new HashMap<String, String>(privateKeys)); } return clone; }
@Test public void testClone() throws Exception { DefaultCryptoKeyReaderConfigurationData conf = new DefaultCryptoKeyReaderConfigurationData(); conf.setDefaultPublicKey("file:///path/to/default-public.key"); conf.setDefaultPrivateKey("file:///path/to/default-private.key"); conf.setPublicKey("key1", "file:///path/to/public1.key"); conf.setPrivateKey("key2", "file:///path/to/private2.key"); DefaultCryptoKeyReaderConfigurationData clone = conf.clone(); conf.setDefaultPublicKey("data:AAAAA"); conf.setDefaultPrivateKey("data:BBBBB"); conf.setPublicKey("key1", "data:CCCCC"); conf.setPrivateKey("key2", "data:DDDDD"); assertEquals(clone.getDefaultPublicKey(), "file:///path/to/default-public.key"); assertEquals(clone.getDefaultPrivateKey(), "file:///path/to/default-private.key"); assertEquals(clone.getPublicKeys().get("key1"), "file:///path/to/public1.key"); assertEquals(clone.getPrivateKeys().get("key2"), "file:///path/to/private2.key"); }
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { final Fetch<K, V> fetch = Fetch.empty(); final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final CompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) break; if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and // (2) there are no fetched completedFetch with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) fetchBuffer.poll(); throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); fetchBuffer.setNextInLineFetch(null); } else { final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll fetchBuffer.addAll(pausedCompletedFetches); } return fetch; }
@Test public void testFetchingPausedPartitionsYieldsNoRecords() { buildDependencies(); assignAndSeek(topicAPartition0); // The partition should not be 'paused' in the SubscriptionState until we explicitly tell it to. assertFalse(subscriptions.isPaused(topicAPartition0)); subscriptions.pause(topicAPartition0); assertTrue(subscriptions.isPaused(topicAPartition0)); CompletedFetch completedFetch = completedFetchBuilder.build(); // Set the CompletedFetch to the next-in-line fetch, *not* the queue. fetchBuffer.setNextInLineFetch(completedFetch); // The next-in-line CompletedFetch should reference the same object that was just created assertSame(fetchBuffer.nextInLineFetch(), completedFetch); // The FetchBuffer queue should be empty as the CompletedFetch was added to the next-in-line. // CompletedFetch, not the queue. assertTrue(fetchBuffer.isEmpty()); // Ensure that the partition for the next-in-line CompletedFetch is still 'paused'. assertTrue(subscriptions.isPaused(completedFetch.partition)); Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); // There should be no records in the Fetch as the partition being fetched is 'paused'. assertEquals(0, fetch.numRecords()); // The FetchBuffer queue should not be empty; the CompletedFetch is added to the FetchBuffer queue by // the FetchCollector when it detects a 'paused' partition. assertFalse(fetchBuffer.isEmpty()); // The next-in-line CompletedFetch should be null; the CompletedFetch is added to the FetchBuffer // queue by the FetchCollector when it detects a 'paused' partition. assertNull(fetchBuffer.nextInLineFetch()); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testPreferredReadReplica() { buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); subscriptions.seek(tp0, 0); // Take note of the preferred replica before the first fetch response Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); assertEquals(-1, selected.id()); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // Set preferred read replica to node=1 client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1))); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); // Verify selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); assertEquals(1, selected.id()); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // Set preferred read replica to node=2, which isn't in our metadata, should revert to leader client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(2))); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); assertEquals(-1, selected.id()); }
@Restricted(NoExternalUse.class) public static Icon tryGetIcon(String iconGuess) { // Jenkins Symbols don't have metadata so return null if (iconGuess == null || iconGuess.startsWith("symbol-")) { return null; } Icon iconMetadata = IconSet.icons.getIconByClassSpec(iconGuess); // `iconGuess` must be class names if it contains a whitespace. // It may contains extra css classes unrelated to icons. // Filter classes with `icon-` prefix. if (iconMetadata == null && iconGuess.contains(" ")) { iconMetadata = IconSet.icons.getIconByClassSpec(filterIconNameClasses(iconGuess)); } if (iconMetadata == null) { // Icon could be provided as a simple iconFileName e.g. "help.svg" iconMetadata = IconSet.icons.getIconByClassSpec(IconSet.toNormalizedIconNameClass(iconGuess) + " icon-md"); } if (iconMetadata == null) { // Icon could be provided as an absolute iconFileName e.g. "/plugin/foo/abc.png" iconMetadata = IconSet.icons.getIconByUrl(iconGuess); } return iconMetadata; }
@Test public void tryGetIcon_shouldReturnMetadataForUrl() throws Exception { assertThat(Functions.tryGetIcon("48x48/green.gif"), is(not(nullValue()))); }
@Override @DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换 public void updateTenant(TenantSaveReqVO updateReqVO) { // 校验存在 TenantDO tenant = validateUpdateTenant(updateReqVO.getId()); // 校验租户名称是否重复 validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId()); // 校验租户域名是否重复 validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId()); // 校验套餐被禁用 TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId()); // 更新租户 TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class); tenantMapper.updateById(updateObj); // 如果套餐发生变化,则修改其角色的权限 if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) { updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds()); } }
@Test public void testUpdateTenant_system() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM)); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 准备参数 TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> { o.setId(dbTenant.getId()); // 设置更新的 ID }); // 调用,校验业务异常 assertServiceException(() -> tenantService.updateTenant(reqVO), TENANT_CAN_NOT_UPDATE_SYSTEM); }
@Override public long getMax() { if (values.length == 0) { return 0; } return values[values.length - 1]; }
@Test public void calculatesTheMaximumValue() { assertThat(snapshot.getMax()) .isEqualTo(5); }
@Override public T getHollowObject(int ordinal) { List<T> refCachedItems = cachedItems; if (refCachedItems == null) { throw new IllegalStateException(String.format("HollowObjectCacheProvider for type %s has been detached or was not initialized", typeReadState == null ? null : typeReadState.getSchema().getName())); } if (refCachedItems.size() <= ordinal) { throw new IllegalStateException(String.format("Ordinal %s is out of bounds for pojo cache array of size %s.", ordinal, refCachedItems.size())); } return refCachedItems.get(ordinal); }
@Test public void adding_withOrdinalGaps() { TypeA a = typeA(1); notifyAdded(a); assertNull(subject.get().getHollowObject(0)); assertEquals(a, subject.get().getHollowObject(a.ordinal)); }
public static String getRemoteAddr(HttpServletRequest request) { String remoteAddr = request.getRemoteAddr(); String proxyHeader = request.getHeader("X-Forwarded-For"); if (proxyHeader != null && ProxyServers.isProxyServer(remoteAddr)) { final String clientAddr = proxyHeader.split(",")[0].trim(); if (!clientAddr.isEmpty()) { remoteAddr = clientAddr; } } return remoteAddr; }
@Test public void testRemoteAddrWithTrustedProxyAndEmptyClient() { assertEquals(proxyAddr, getRemoteAddr(null, proxyAddr, true)); assertEquals(proxyAddr, getRemoteAddr("", proxyAddr, true)); }
@Override public Object invoke(MethodInvocation methodInvocation) throws Throwable { // 入栈 DataPermission dataPermission = this.findAnnotation(methodInvocation); if (dataPermission != null) { DataPermissionContextHolder.add(dataPermission); } try { // 执行逻辑 return methodInvocation.proceed(); } finally { // 出栈 if (dataPermission != null) { DataPermissionContextHolder.remove(); } } }
@Test // 在 Class 上有 @DataPermission 注解 public void testInvoke_class() throws Throwable { // 参数 mockMethodInvocation(TestClass.class); // 调用 Object result = interceptor.invoke(methodInvocation); // 断言 assertEquals("class", result); assertEquals(1, interceptor.getDataPermissionCache().size()); assertFalse(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable()); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldRemoveUnusedActiveTaskFromStateUpdaterAndCloseCleanly() { final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions) .inState(State.RESTORING) .withInputPartitions(taskId03Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(activeTaskToClose)); final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>(); when(stateUpdater.remove(activeTaskToClose.id())).thenReturn(future); future.complete(new StateUpdater.RemovedTaskResult(activeTaskToClose)); taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap()); verify(activeTaskToClose).suspend(); verify(activeTaskToClose).closeClean(); verify(activeTaskCreator).closeAndRemoveTaskProducerIfNeeded(activeTaskToClose.id()); verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap()); verify(standbyTaskCreator).createTasks(Collections.emptyMap()); }
@ApiOperation(value = "Create or update Alarm Comment ", notes = "Creates or Updates the Alarm Comment. " + "When creating comment, platform generates Alarm Comment Id as " + UUID_WIKI_LINK + "The newly created Alarm Comment id will be present in the response. Specify existing Alarm Comment id to update the alarm. " + "Referencing non-existing Alarm Comment Id will cause 'Not Found' error. " + "\n\n To create new Alarm comment entity it is enough to specify 'comment' json element with 'text' node, for example: {\"comment\": { \"text\": \"my comment\"}}. " + "\n\n If comment type is not specified the default value 'OTHER' will be saved. If 'alarmId' or 'userId' specified in body it will be ignored." + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/alarm/{alarmId}/comment", method = RequestMethod.POST) @ResponseBody public AlarmComment saveAlarmComment(@Parameter(description = ALARM_ID_PARAM_DESCRIPTION) @PathVariable(ALARM_ID) String strAlarmId, @io.swagger.v3.oas.annotations.parameters.RequestBody(description = "A JSON value representing the comment.") @RequestBody AlarmComment alarmComment) throws ThingsboardException { checkParameter(ALARM_ID, strAlarmId); AlarmId alarmId = new AlarmId(toUUID(strAlarmId)); Alarm alarm = checkAlarmInfoId(alarmId, Operation.WRITE); alarmComment.setAlarmId(alarmId); return tbAlarmCommentService.saveAlarmComment(alarm, alarmComment, getCurrentUser()); }
@Test public void testUpdateAlarmViaTenant() throws Exception { loginTenantAdmin(); AlarmComment savedComment = createAlarmComment(alarm.getId()); Mockito.reset(tbClusterService, auditLogService); JsonNode newComment = JacksonUtil.newObjectNode().set("text", new TextNode("Updated comment")); savedComment.setComment(newComment); AlarmComment updatedAlarmComment = saveAlarmComment(alarm.getId(), savedComment); Assert.assertNotNull(updatedAlarmComment); Assert.assertEquals(newComment.get("text"), updatedAlarmComment.getComment().get("text")); Assert.assertEquals("true", updatedAlarmComment.getComment().get("edited").asText()); Assert.assertNotNull(updatedAlarmComment.getComment().get("editedOn")); testLogEntityActionEntityEqClass(alarm, alarm.getId(), tenantId, customerId, tenantAdminUserId, TENANT_ADMIN_EMAIL, ActionType.UPDATED_COMMENT, 1, updatedAlarmComment); }
void readFromFile() throws IOException { final Counter counter = new CounterStorage(this).readFromFile(); if (counter != null) { final Counter newCounter = clone(); startDate = counter.getStartDate(); requests.clear(); for (final CounterRequest request : counter.getRequests()) { requests.put(request.getName(), request); } if (errors != null) { errors.clear(); errors.addAll(counter.getErrors()); } // on ajoute les nouvelles requêtes enregistrées avant de lire le fichier // (par ex. les premières requêtes collectées par le serveur de collecte lors de l'initialisation) addRequestsAndErrors(newCounter); } }
@Test public void testReadFromFile() throws IOException { // test pour un counter vide counter.clear(); counter.writeToFile(); counter.readFromFile(); // test pour un counter non vide final CounterRequest counterRequest = createCounterRequest(); counter.addHits(counterRequest); counter.writeToFile(); // readFromFile ajoute les requêtes lues aux requêtes actuelles counter.readFromFile(); assertEquals("request hits", counterRequest.getHits() * 2, counter.getRequests().get(0).getHits()); counter.clear(); counter.readFromFile(); assertEquals("request", counterRequest.toString(), counter.getRequests().get(0).toString()); }
@Override public Collection<SQLToken> generateSQLTokens(final AlterTableStatementContext sqlStatementContext) { String tableName = sqlStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue(); EncryptTable encryptTable = encryptRule.getEncryptTable(tableName); Collection<SQLToken> result = new LinkedList<>(getAddColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getAddColumnDefinitions())); result.addAll(getModifyColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getModifyColumnDefinitions())); result.addAll(getChangeColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getChangeColumnDefinitions())); List<SQLToken> dropColumnTokens = getDropColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getDropColumnDefinitions()); String databaseName = sqlStatementContext.getDatabaseType().getType(); if ("SQLServer".equals(databaseName)) { result.addAll(mergeDropColumnStatement(dropColumnTokens, "", "")); } else if ("Oracle".equals(databaseName)) { result.addAll(mergeDropColumnStatement(dropColumnTokens, "(", ")")); } else { result.addAll(dropColumnTokens); } return result; }
@Test void assertModifyEncryptColumnGenerateSQLTokens() { assertThrows(UnsupportedOperationException.class, () -> generator.generateSQLTokens(mockModifyColumnStatementContext())); }
@Override public void run(RunJobRequest request, StreamObserver<RunJobResponse> responseObserver) { LOG.trace("{} {}", RunJobRequest.class.getSimpleName(), request); String preparationId = request.getPreparationId(); try { // retrieve job preparation JobPreparation preparation = preparations.get(preparationId); if (preparation == null) { String errMessage = String.format("Unknown Preparation Id \"%s\".", preparationId); StatusException exception = Status.NOT_FOUND.withDescription(errMessage).asException(); responseObserver.onError(exception); return; } try { PipelineValidator.validate(preparation.pipeline()); } catch (Exception e) { LOG.warn("Encountered Unexpected Exception during validation", e); responseObserver.onError(new StatusRuntimeException(Status.INVALID_ARGUMENT.withCause(e))); return; } // create new invocation JobInvocation invocation = invoker.invoke( resolveDependencies(preparation.pipeline(), stagingSessionTokens.get(preparationId)), preparation.options(), request.getRetrievalToken()); String invocationId = invocation.getId(); invocation.addStateListener( event -> { if (!JobInvocation.isTerminated(event.getState())) { return; } String stagingSessionToken = stagingSessionTokens.get(preparationId); stagingSessionTokens.remove(preparationId); try { if (cleanupJobFn != null) { cleanupJobFn.accept(stagingSessionToken); } } catch (Exception e) { LOG.warn( "Failed to remove job staging directory for token {}.", stagingSessionToken, e); } finally { onFinishedInvocationCleanup(invocationId); } }); invocation.start(); invocations.put(invocationId, invocation); // Cleanup this preparation because we are running it now. // If we fail, we need to prepare again. preparations.remove(preparationId); RunJobResponse response = RunJobResponse.newBuilder().setJobId(invocationId).build(); responseObserver.onNext(response); responseObserver.onCompleted(); } catch (StatusRuntimeException e) { LOG.warn("Encountered Status Exception", e); responseObserver.onError(e); } catch (Exception e) { String errMessage = String.format("Encountered Unexpected Exception for Preparation %s", preparationId); LOG.error(errMessage, e); responseObserver.onError(Status.INTERNAL.withCause(e).asException()); } }
@Test public void testJobSubmissionUsesJobInvokerAndIsSuccess() throws Exception { JobApi.PrepareJobResponse prepareResponse = prepareJob(); // run job JobApi.RunJobRequest runRequest = JobApi.RunJobRequest.newBuilder() .setPreparationId(prepareResponse.getPreparationId()) .setRetrievalToken(TEST_RETRIEVAL_TOKEN) .build(); RecordingObserver<JobApi.RunJobResponse> runRecorder = new RecordingObserver<>(); service.run(runRequest, runRecorder); verify(invoker, times(1)).invoke(TEST_PIPELINE, TEST_OPTIONS, TEST_RETRIEVAL_TOKEN); assertThat(runRecorder.isSuccessful(), is(true)); assertThat(runRecorder.values, hasSize(1)); JobApi.RunJobResponse runResponse = runRecorder.values.get(0); assertThat(runResponse.getJobId(), is(TEST_JOB_ID)); verify(invocation, times(1)).addStateListener(any()); verify(invocation, times(1)).start(); }
@SuppressWarnings("checkstyle:NestedIfDepth") @Nullable public PartitioningStrategy getPartitioningStrategy( String mapName, PartitioningStrategyConfig config, final List<PartitioningAttributeConfig> attributeConfigs ) { if (attributeConfigs != null && !attributeConfigs.isEmpty()) { return cache.computeIfAbsent(mapName, k -> createAttributePartitionStrategy(attributeConfigs)); } if (config != null && config.getPartitioningStrategy() != null) { return config.getPartitioningStrategy(); } if (config != null && config.getPartitioningStrategyClass() != null) { PartitioningStrategy<?> strategy = cache.get(mapName); if (strategy != null) { return strategy; } try { // We don't use computeIfAbsent intentionally so that the map isn't blocked if the instantiation takes a // long time - it's user code strategy = ClassLoaderUtil.newInstance(configClassLoader, config.getPartitioningStrategyClass()); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } cache.putIfAbsent(mapName, strategy); return strategy; } return null; }
@Test public void whenPartitioningStrategyDefined_getPartitioningStrategy_returnsSameInstance() { PartitioningStrategy configuredPartitioningStrategy = new StringPartitioningStrategy(); PartitioningStrategyConfig cfg = new PartitioningStrategyConfig(configuredPartitioningStrategy); PartitioningStrategy partitioningStrategy = partitioningStrategyFactory.getPartitioningStrategy(mapName, cfg, null); assertSame(configuredPartitioningStrategy, partitioningStrategy); }
ByteBuffer serialize(final int endPadding) { final int sizeOfValueLength = Integer.BYTES; final int sizeOfPriorValue = priorValue == null ? 0 : priorValue.length; final int sizeOfOldValue = oldValue == null || priorValue == oldValue ? 0 : oldValue.length; final int sizeOfNewValue = newValue == null ? 0 : newValue.length; final byte[] serializedContext = recordContext.serialize(); final ByteBuffer buffer = ByteBuffer.allocate( serializedContext.length + sizeOfValueLength + sizeOfPriorValue + sizeOfValueLength + sizeOfOldValue + sizeOfValueLength + sizeOfNewValue + endPadding ); buffer.put(serializedContext); addValue(buffer, priorValue); if (oldValue == null) { buffer.putInt(NULL_VALUE_SENTINEL); } else if (Arrays.equals(priorValue, oldValue)) { buffer.putInt(OLD_PREV_DUPLICATE_VALUE_SENTINEL); } else { buffer.putInt(sizeOfOldValue); buffer.put(oldValue); } addValue(buffer, newValue); return buffer; }
@Test public void shouldSerializeNew() { final ProcessorRecordContext context = new ProcessorRecordContext(0L, 0L, 0, "topic", new RecordHeaders()); final byte[] serializedContext = context.serialize(); final byte[] newValue = {(byte) 5}; final byte[] bytes = new BufferValue(null, null, newValue, context).serialize(0).array(); final byte[] withoutContext = Arrays.copyOfRange(bytes, serializedContext.length, bytes.length); assertThat(withoutContext, is(ByteBuffer.allocate(Integer.BYTES * 3 + 1).putInt(-1).putInt(-1).putInt(1).put(newValue).array())); }
public static MetricsReporter combine(MetricsReporter first, MetricsReporter second) { if (null == first) { return second; } else if (null == second || first == second) { return first; } Set<MetricsReporter> reporters = Sets.newIdentityHashSet(); if (first instanceof CompositeMetricsReporter) { reporters.addAll(((CompositeMetricsReporter) first).reporters()); } else { reporters.add(first); } if (second instanceof CompositeMetricsReporter) { reporters.addAll(((CompositeMetricsReporter) second).reporters()); } else { reporters.add(second); } return new CompositeMetricsReporter(reporters); }
@Test public void combineSameInstances() { MetricsReporter reporter = LoggingMetricsReporter.instance(); assertThat(MetricsReporters.combine(reporter, reporter)).isSameAs(reporter); }
@Override public InterpreterResult interpret(final String st, final InterpreterContext context) throws InterpreterException { if (LOGGER.isDebugEnabled()) { LOGGER.debug("st:\n{}", st); } final FormType form = getFormType(); RemoteInterpreterProcess interpreterProcess = null; try { interpreterProcess = getOrCreateInterpreterProcess(); } catch (IOException e) { throw new InterpreterException(e); } if (!interpreterProcess.isRunning()) { return new InterpreterResult(InterpreterResult.Code.ERROR, "Interpreter process is not running\n" + interpreterProcess.getErrorMessage()); } return interpreterProcess.callRemoteFunction(client -> { RemoteInterpreterResult remoteResult = client.interpret( sessionId, className, st, convert(context)); Map<String, Object> remoteConfig = (Map<String, Object>) GSON.fromJson( remoteResult.getConfig(), new TypeToken<Map<String, Object>>() { }.getType()); context.getConfig().clear(); if (remoteConfig != null) { context.getConfig().putAll(remoteConfig); } GUI currentGUI = context.getGui(); GUI currentNoteGUI = context.getNoteGui(); if (form == FormType.NATIVE) { GUI remoteGui = GUI.fromJson(remoteResult.getGui()); GUI remoteNoteGui = GUI.fromJson(remoteResult.getNoteGui()); currentGUI.clear(); currentGUI.setParams(remoteGui.getParams()); currentGUI.setForms(remoteGui.getForms()); currentNoteGUI.setParams(remoteNoteGui.getParams()); currentNoteGUI.setForms(remoteNoteGui.getForms()); } else if (form == FormType.SIMPLE) { final Map<String, Input> currentForms = currentGUI.getForms(); final Map<String, Object> currentParams = currentGUI.getParams(); final GUI remoteGUI = GUI.fromJson(remoteResult.getGui()); final Map<String, Input> remoteForms = remoteGUI.getForms(); final Map<String, Object> remoteParams = remoteGUI.getParams(); currentForms.putAll(remoteForms); currentParams.putAll(remoteParams); } return convert(remoteResult); } ); }
@Test void testExecuteIncorrectPrecode() throws TTransportException, IOException, InterpreterException { interpreterSetting.getOption().setPerUser(InterpreterOption.SHARED); interpreterSetting.setProperty("zeppelin.SleepInterpreter.precode", "fail test"); Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", note1Id, "sleep"); InterpreterContext context1 = createDummyInterpreterContext();; assertEquals(Code.ERROR, interpreter1.interpret("10", context1).code()); }
@SuppressFBWarnings(value = "EI_EXPOSE_REP2") public void setLocalCommandsQueryAppIds(final Set<String> ids) { this.localCommandsQueryAppIds = Optional.of(ids); }
@Test public void shouldOnlyReturnTrueForPresentLocalCommands() { // Given: Set<String> localCommands = new HashSet<>(ALL_APP_IDS.subList(0, 3)); service.setLocalCommandsQueryAppIds(localCommands); // When: List<Boolean> results = ALL_APP_IDS.stream() .map(service::foundInLocalCommands) .collect(Collectors.toList()); // Then: results.subList(0, 3).forEach(result -> assertEquals(result, true)); results.subList(3, 5).forEach(result -> assertEquals(result, false)); }
@Override public ObjectNode encode(Region region, CodecContext context) { checkNotNull(region, NULL_REGION_MSG); ObjectNode result = context.mapper().createObjectNode() .put(REGION_ID, region.id().toString()) .put(NAME, region.name()) .put(TYPE, region.type().toString()); ArrayNode masters = context.mapper().createArrayNode(); region.masters().forEach(sets -> { ArrayNode setsJson = context.mapper().createArrayNode(); sets.forEach(nodeId -> setsJson.add(nodeId.toString())); masters.add(setsJson); }); result.set(MASTERS, masters); return annotate(result, region, context); }
@Test public void testRegionEncode() { NodeId nodeId1 = NodeId.nodeId("1"); NodeId nodeId2 = NodeId.nodeId("2"); NodeId nodeId3 = NodeId.nodeId("3"); NodeId nodeId4 = NodeId.nodeId("4"); Set<NodeId> set1 = ImmutableSet.of(nodeId1); Set<NodeId> set2 = ImmutableSet.of(nodeId1, nodeId2); Set<NodeId> set3 = ImmutableSet.of(nodeId1, nodeId2, nodeId3); Set<NodeId> set4 = ImmutableSet.of(nodeId1, nodeId2, nodeId3, nodeId4); List<Set<NodeId>> masters = ImmutableList.of(set1, set2, set3, set4); RegionId regionId = RegionId.regionId("1"); String name = "foo"; Region.Type type = Region.Type.ROOM; Annotations noAnnots = DefaultAnnotations.EMPTY; Region region = new DefaultRegion(regionId, name, type, noAnnots, masters); ObjectNode regionJson = regionCodec.encode(region, context); assertThat(regionJson, matchesRegion(region)); }
@Override public <KR> KStream<KR, V> selectKey(final KeyValueMapper<? super K, ? super V, ? extends KR> mapper) { return selectKey(mapper, NamedInternal.empty()); }
@Test public void shouldNotAllowNullMapperOnSelectKeyWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.selectKey(null, Named.as("keySelector"))); assertThat(exception.getMessage(), equalTo("mapper can't be null")); }
static long getLongMeasureValue(CounterInitializationContext counterContext, String metricKey) { Measure measure = counterContext.getMeasure(metricKey).orElse(DEFAULT_MEASURE_LONG); if (measure.getValueType() == Measure.ValueType.NO_VALUE) { return 0L; } if (measure.getValueType() == Measure.ValueType.INT) { return measure.getIntValue(); } return measure.getLongValue(); }
@Test void getLongMeasureValue_returns_value_if_measure_is_LONG() { fileAggregateContext.put(SOME_METRIC_KEY, newMeasureBuilder().create(152L)); assertThat(getLongMeasureValue(fileAggregateContext, SOME_METRIC_KEY)).isEqualTo(152L); }
static List<String> split(final String str, final char delimiterChar, final char escapeChar) throws IllegalArgumentException { if (str == null) { return null; } int len = str.length(); if (len == 0) { return Collections.emptyList(); } List<String> list = new ArrayList<String>(); // Keeps track of offset of the passed string. int offset = 0; // Indicates start offset from which characters will be copied from original // string to destination string. Resets when an escape or delimiter char is // encountered. int startOffset = 0; StringBuilder builder = new StringBuilder(len); // Iterate over the string till we reach the end. while (offset < len) { if (str.charAt(offset) == escapeChar) { // An escape character must be followed by a delimiter or escape char // but we have reached the end and have no further character to look at. if (offset + 1 >= len) { throw new IllegalArgumentException( "Escape char not properly escaped."); } char nextChar = str.charAt(offset + 1); // Next character must be a delimiter or an escape char. if (nextChar != escapeChar && nextChar != delimiterChar) { throw new IllegalArgumentException( "Escape char or delimiter char not properly escaped."); } // Copy contents from the offset where last escape or delimiter char was // encountered. if (startOffset < offset) { builder.append(str.substring(startOffset, offset)); } builder.append(nextChar); offset += 2; // Reset the start offset as an escape char has been encountered. startOffset = offset; continue; } else if (str.charAt(offset) == delimiterChar) { // A delimiter has been encountered without an escape character. // String needs to be split here. Copy remaining chars and add the // string to list. builder.append(str.substring(startOffset, offset)); list.add(builder.toString().trim()); // Reset the start offset as a delimiter has been encountered. startOffset = ++offset; builder = new StringBuilder(len - offset); continue; } offset++; } // Copy rest of the characters. if (!str.isEmpty()) { builder.append(str.substring(startOffset)); } // Add the last part of delimited string to list. list.add(builder.toString().trim()); return list; }
@Test void testSplitUsingEscapeAndDelimChar() throws Exception { List<String> list = TimelineReaderUtils.split("*!cluster!*!b**o***!xer!oozie**", '!', '*'); String[] arr = new String[list.size()]; arr = list.toArray(arr); assertArrayEquals(new String[]{"!cluster", "!b*o*!xer", "oozie*"}, arr); list = TimelineReaderUtils.split("*!cluster!*!b**o***!xer!!", '!', '*'); arr = new String[list.size()]; arr = list.toArray(arr); assertArrayEquals(new String[]{"!cluster", "!b*o*!xer", "", ""}, arr); }
@Override public boolean tryClaim(ByteKey key) { // Handle claiming the end of range EMPTY key if (key.isEmpty()) { checkArgument( lastAttemptedKey == null || !lastAttemptedKey.isEmpty(), "Trying to claim key %s while last attempted key was %s", key, lastAttemptedKey); lastAttemptedKey = key; return false; } checkArgument( lastAttemptedKey == null || key.compareTo(lastAttemptedKey) > 0, "Trying to claim key %s while last attempted key was %s", key, lastAttemptedKey); checkArgument( key.compareTo(range.getStartKey()) >= 0, "Trying to claim key %s before start of the range %s", key, range); lastAttemptedKey = key; // No respective checkArgument for i < range.to() - it's ok to try claiming keys beyond if (!range.getEndKey().isEmpty() && key.compareTo(range.getEndKey()) >= 0) { return false; } lastClaimedKey = key; return true; }
@Test public void testTryClaim() throws Exception { ByteKeyRange range = ByteKeyRange.of(ByteKey.of(0x10), ByteKey.of(0xc0)); ByteKeyRangeTracker tracker = ByteKeyRangeTracker.of(range); assertEquals(range, tracker.currentRestriction()); assertTrue(tracker.tryClaim(ByteKey.of(0x10))); assertTrue(tracker.tryClaim(ByteKey.of(0x10, 0x00))); assertTrue(tracker.tryClaim(ByteKey.of(0x10, 0x00, 0x00))); assertTrue(tracker.tryClaim(ByteKey.of(0x50))); assertTrue(tracker.tryClaim(ByteKey.of(0x99))); assertFalse(tracker.tryClaim(ByteKey.of(0xc0))); tracker.checkDone(); }
@Override public void reconcileExecutionDeployments( ResourceID taskExecutorHost, ExecutionDeploymentReport executionDeploymentReport, Map<ExecutionAttemptID, ExecutionDeploymentState> expectedDeployedExecutions) { final Set<ExecutionAttemptID> unknownExecutions = new HashSet<>(executionDeploymentReport.getExecutions()); final Set<ExecutionAttemptID> missingExecutions = new HashSet<>(); for (Map.Entry<ExecutionAttemptID, ExecutionDeploymentState> execution : expectedDeployedExecutions.entrySet()) { boolean deployed = unknownExecutions.remove(execution.getKey()); if (!deployed && execution.getValue() != ExecutionDeploymentState.PENDING) { missingExecutions.add(execution.getKey()); } } if (!unknownExecutions.isEmpty()) { handler.onUnknownDeploymentsOf(unknownExecutions, taskExecutorHost); } if (!missingExecutions.isEmpty()) { handler.onMissingDeploymentsOf(missingExecutions, taskExecutorHost); } }
@Test void testUnknownDeployments() { TestingExecutionDeploymentReconciliationHandler handler = new TestingExecutionDeploymentReconciliationHandler(); DefaultExecutionDeploymentReconciler reconciler = new DefaultExecutionDeploymentReconciler(handler); ResourceID resourceId = generate(); ExecutionAttemptID attemptId = createExecutionAttemptId(); reconciler.reconcileExecutionDeployments( resourceId, new ExecutionDeploymentReport(Collections.singleton(attemptId)), Collections.emptyMap()); assertThat(handler.getMissingExecutions()).isEmpty(); assertThat(handler.getUnknownExecutions()).contains(attemptId); }
public void logRequest(Config config, HttpRequest request) { requestCount++; String uri = request.getUrl(); HttpLogModifier requestModifier = logModifier(config, uri); String maskedUri = requestModifier == null ? uri : requestModifier.uri(uri); StringBuilder sb = new StringBuilder(); sb.append("request:\n").append(requestCount).append(" > ") .append(request.getMethod()).append(' ').append(maskedUri); logHeaders(requestCount, " > ", sb, requestModifier, request.getHeaders()); ResourceType rt = ResourceType.fromContentType(request.getContentType()); if (rt == null || rt.isBinary()) { // don't log body } else { byte[] body; if (rt == ResourceType.MULTIPART) { body = request.getBodyForDisplay() == null ? null : request.getBodyForDisplay().getBytes(); } else { body = request.getBody(); } logBody(config, requestModifier, sb, uri, body, true, rt); } sb.append('\n'); logger.debug("{}", sb); }
@Test void testRequestLoggingTurtleWithCharset() { HttpRequest httpRequest = httpRequestBuilder.body(TURTLE_SAMPLE).contentType("text/turtle; charset=UTF-8").path("/ttl").build(); httpLogger.logRequest(config, httpRequest); String logs = logAppender.collect(); assertTrue(logs.contains(TURTLE_SAMPLE)); assertTrue(logs.contains("Content-Type: text/turtle; charset=UTF-8")); }
public static String formatXml(String xml){ try { TransformerFactory factory = TransformerFactory.newInstance(); factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); Transformer serializer= factory.newTransformer(); serializer.setOutputProperty(OutputKeys.INDENT, "yes"); serializer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); Source xmlSource = new SAXSource(new InputSource(new StringReader(xml))); StringWriter stringWriter = new StringWriter(); StreamResult res = new StreamResult(stringWriter); serializer.transform(xmlSource, res); return stringWriter.toString(); } catch (Exception e) { return xml; } }
@Test public void testFormatXmlComplex() { assertThat( XPathUtil.formatXml( "<one foo='bar'><two/><three><four p=\"1\"/></three>...</one>"), CoreMatchers.is(String.join(lineSeparator, "<?xml version=\"1.0\" encoding=\"UTF-8\"?><one foo=\"bar\">", " <two/>", " <three>", " <four p=\"1\"/>", " </three>...</one>", ""))); }
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress, HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger) throws SocketException, IOException, UnknownHostException { multicastSocket.setReuseAddress(true); // bind to receive interface multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort())); multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive()); try { boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress(); Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled(); if (loopbackModeEnabled != null) { // setting loopbackmode is just a hint - and the argument means "disable"! // to check the real value we call getLoopbackMode() (and again - return value means "disabled") multicastSocket.setLoopbackMode(!loopbackModeEnabled); } // If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one, // then print a warning if (loopbackBind && multicastSocket.getLoopbackMode()) { logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is " + "disabled. This could cause multicast auto-discovery issues " + "and render it unable to work. Check your network connectivity, try to enable the " + "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM."); } // warning: before modifying lines below, take a look at these links: // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033 // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758 // https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270 boolean callSetInterface = OS.isMac() || !loopbackBind; String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE); if (propSetInterface != null) { callSetInterface = Boolean.parseBoolean(propSetInterface); } if (callSetInterface) { multicastSocket.setInterface(bindAddress.getInetAddress()); } } catch (Exception e) { logger.warning(e); } multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE); multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE); String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP); if (multicastGroup == null) { multicastGroup = multicastConfig.getMulticastGroup(); } multicastConfig.setMulticastGroup(multicastGroup); multicastSocket.joinGroup(InetAddress.getByName(multicastGroup)); multicastSocket.setSoTimeout(SOCKET_TIMEOUT); }
@Test public void testMulticastParams() throws Exception { Config config = createConfig(null); MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig(); MulticastSocket multicastSocket = mock(MulticastSocket.class); Address address = new Address("10.0.0.2", 5701); HazelcastProperties hzProperties = new HazelcastProperties(config); MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class)); verify(multicastSocket).bind(new InetSocketAddress(multicastConfig.getMulticastPort())); verify(multicastSocket).setTimeToLive(multicastConfig.getMulticastTimeToLive()); verify(multicastSocket, never()).setLoopbackMode(anyBoolean()); verify(multicastSocket).joinGroup(InetAddress.getByName(multicastConfig.getMulticastGroup())); }
@Override public void profileUnsetPushId(String pushTypeKey) { }
@Test public void profileUnsetPushId() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.profileUnsetPushId("jpush_id"); }
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException { Marshaller marshaller = getContext(clazz).createMarshaller(); setMarshallerProperties(marshaller); if (marshallerEventHandler != null) { marshaller.setEventHandler(marshallerEventHandler); } marshaller.setSchema(marshallerSchema); return marshaller; }
@Test void buildsMarshallerWithFragmentProperty() throws Exception { JAXBContextFactory factory = new JAXBContextFactory.Builder().withMarshallerFragment(true).build(); Marshaller marshaller = factory.createMarshaller(Object.class); assertThat((Boolean) marshaller.getProperty(Marshaller.JAXB_FRAGMENT)).isTrue(); }
public static void latchAwait(CountDownLatch latch) { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }
@Test void testLatchAwaitForTimeout() { final CountDownLatch countDownLatch = new CountDownLatch(1); long currentTime = System.currentTimeMillis(); ThreadUtils.latchAwait(countDownLatch, 50, TimeUnit.MILLISECONDS); assertTrue(System.currentTimeMillis() - currentTime >= 50); }
@Override public String type() { return super.actualType; }
@Test void testType() { String type = SdkProtocolNegotiatorBuilderSingleton.getSingleton().type(); assertNotNull(type); assertEquals(SdkProtocolNegotiatorBuilderSingleton.TYPE_PROPERTY_KEY, type); }
public static ResourceSchema convert(Schema icebergSchema) throws IOException { ResourceSchema result = new ResourceSchema(); result.setFields(convertFields(icebergSchema.columns())); return result; }
@Test public void testTupleInMap() throws IOException { Schema icebergSchema = new Schema( optional( 1, "nested_list", MapType.ofOptional( 2, 3, StringType.get(), ListType.ofOptional( 4, StructType.of( required(5, "id", LongType.get()), optional(6, "data", StringType.get())))))); ResourceSchema pigSchema = SchemaUtil.convert(icebergSchema); // The output should contain a nested struct within a list within a map, I think. assertThat(pigSchema.toString()).isEqualTo("nested_list:[{(id:long,data:chararray)}]"); }
@Override public Iterator<E> iterator() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void iterator_whenCalled_thenUnsupportedOperationException() { queue.iterator(); }
@Override public Optional<ShardingConditionValue> generate(final InExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) { if (predicate.isNot()) { return Optional.empty(); } Collection<ExpressionSegment> expressionSegments = predicate.getExpressionList(); List<Integer> parameterMarkerIndexes = new ArrayList<>(expressionSegments.size()); List<Comparable<?>> shardingConditionValues = new LinkedList<>(); for (ExpressionSegment each : expressionSegments) { ConditionValue conditionValue = new ConditionValue(each, params); Optional<Comparable<?>> value = conditionValue.getValue(); if (conditionValue.isNull()) { shardingConditionValues.add(null); conditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add); continue; } if (value.isPresent()) { shardingConditionValues.add(value.get()); conditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add); continue; } if (ExpressionConditionUtils.isNowExpression(each)) { shardingConditionValues.add(timestampServiceRule.getTimestamp()); } } return shardingConditionValues.isEmpty() ? Optional.empty() : Optional.of(new ListShardingConditionValue<>(column.getName(), column.getTableName(), shardingConditionValues, parameterMarkerIndexes)); }
@Test void assertNullAndCommonExpression() { ListExpression listExpression = new ListExpression(0, 0); listExpression.getItems().add(new LiteralExpressionSegment(0, 0, "test1")); listExpression.getItems().add(new LiteralExpressionSegment(0, 0, null)); listExpression.getItems().add(new LiteralExpressionSegment(0, 0, null)); listExpression.getItems().add(new LiteralExpressionSegment(0, 0, "test2")); InExpression inExpression = new InExpression(0, 0, null, listExpression, false); Optional<ShardingConditionValue> shardingConditionValue = generator.generate(inExpression, column, new LinkedList<>(), timestampServiceRule); assertTrue(shardingConditionValue.isPresent()); assertThat(((ListShardingConditionValue) shardingConditionValue.get()).getValues(), is(Arrays.asList("test1", null, null, "test2"))); assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty()); assertThat(shardingConditionValue.get().toString(), is("tbl.id in (test1,,,test2)")); }
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) { final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds()); try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) { if (!statusCursor.hasNext()) { return ProcessingNodesState.NONE_ACTIVE; } int activeNodes = 0; int idleNodes = 0; while (statusCursor.hasNext()) { activeNodes++; ProcessingStatusDto nodeProcessingStatus = statusCursor.next(); DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing(); // If node is behind and is busy, it is overloaded. if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) { return ProcessingNodesState.SOME_OVERLOADED; } // If a node did not index a message that is at least at the start of the time range, // we consider it idle. if (lastIndexedMessage.isBefore(timeRange.getFrom())) { idleNodes++; } } // Only if all nodes are idle, we stop the processing. if (activeNodes == idleNodes) { return ProcessingNodesState.ALL_IDLE; } } // If none of the above checks return, we can assume that some nodes have already indexed the given timerange. return ProcessingNodesState.SOME_UP_TO_DATE; }
@Test @MongoDBFixtures("processing-status-overloaded-processbuffer-node.json") public void processingStateOverloadedProcessBufferNode() { when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z")); when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds()); TimeRange timeRange = AbsoluteRange.create("2019-01-01T02:00:00.000Z", "2019-01-01T03:00:00.000Z"); assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.SOME_OVERLOADED); }
public static HttpServerResponse create(@Nullable HttpServletRequest request, HttpServletResponse response, @Nullable Throwable caught) { return new HttpServletResponseWrapper(request, response, caught); }
@Test void route_okOnBadAttribute() { HttpServerResponse wrapper = HttpServletResponseWrapper.create(request, response, null); when(request.getAttribute("http.route")).thenReturn(new Object()); assertThat(wrapper.route()).isNull(); }
@Override public Boolean login(Properties properties) { try { if ((System.currentTimeMillis() - lastRefreshTime) < TimeUnit.SECONDS .toMillis(tokenTtl - tokenRefreshWindow)) { return true; } if (StringUtils.isBlank(properties.getProperty(PropertyKeyConst.USERNAME))) { lastRefreshTime = System.currentTimeMillis(); return true; } for (String server : this.serverList) { HttpLoginProcessor httpLoginProcessor = new HttpLoginProcessor(nacosRestTemplate); properties.setProperty(NacosAuthLoginConstant.SERVER, server); LoginIdentityContext identityContext = httpLoginProcessor.getResponse(properties); if (identityContext != null) { if (identityContext.getAllKey().contains(NacosAuthLoginConstant.ACCESSTOKEN)) { tokenTtl = Long.parseLong(identityContext.getParameter(NacosAuthLoginConstant.TOKENTTL)); tokenRefreshWindow = tokenTtl / 10; lastRefreshTime = System.currentTimeMillis(); LoginIdentityContext newCtx = new LoginIdentityContext(); newCtx.setParameter(NacosAuthLoginConstant.ACCESSTOKEN, identityContext.getParameter(NacosAuthLoginConstant.ACCESSTOKEN)); this.loginIdentityContext = newCtx; } return true; } } } catch (Throwable throwable) { SECURITY_LOGGER.warn("[SecurityProxy] login failed, error: ", throwable); return false; } return false; }
@Test void testLoginSuccess() throws Exception { //given NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class); HttpRestResult<Object> result = new HttpRestResult<>(); result.setData("{\"accessToken\":\"ttttttttttttttttt\",\"tokenTtl\":1000}"); result.setCode(200); when(nacosRestTemplate.postForm(any(), (Header) any(), any(), any(), any())).thenReturn(result); Properties properties = new Properties(); properties.setProperty(PropertyKeyConst.USERNAME, "aaa"); properties.setProperty(PropertyKeyConst.PASSWORD, "123456"); List<String> serverList = new ArrayList<>(); serverList.add("localhost"); NacosClientAuthServiceImpl nacosClientAuthService = new NacosClientAuthServiceImpl(); nacosClientAuthService.setServerList(serverList); nacosClientAuthService.setNacosRestTemplate(nacosRestTemplate); //when boolean ret = nacosClientAuthService.login(properties); //then assertTrue(ret); }
@Override public Optional<NativeEntity<CacheDto>> findExisting(Entity entity, Map<String, ValueReference> parameters) { if (entity instanceof EntityV1) { return findExisting((EntityV1) entity, parameters); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test @MongoDBFixtures("LookupCacheFacadeTest.json") public void findExisting() { final Entity entity = EntityV1.builder() .id(ModelId.of("1")) .type(ModelTypes.LOOKUP_CACHE_V1) .data(objectMapper.convertValue(LookupCacheEntity.create( ValueReference.of(DefaultEntityScope.NAME), ValueReference.of("no-op-cache"), ValueReference.of("No-op cache"), ValueReference.of("No-op cache"), ReferenceMapUtils.toReferenceMap(ImmutableMap.of("type", "none")) ), JsonNode.class)) .build(); final NativeEntity<CacheDto> existingCache = facade.findExisting(entity, Collections.emptyMap()) .orElseThrow(AssertionError::new); final NativeEntityDescriptor descriptor = existingCache.descriptor(); final CacheDto cacheDto = existingCache.entity(); assertThat(descriptor.id()).isEqualTo(ModelId.of("5adf24b24b900a0fdb4e52dd")); assertThat(descriptor.type()).isEqualTo(ModelTypes.LOOKUP_CACHE_V1); assertThat(cacheDto.id()).isEqualTo("5adf24b24b900a0fdb4e52dd"); assertThat(cacheDto.name()).isEqualTo("no-op-cache"); assertThat(cacheDto.title()).isEqualTo("No-op cache"); assertThat(cacheDto.description()).isEqualTo("No-op cache"); assertThat(cacheDto.config().type()).isEqualTo("none"); }
public void freezeTime() { embeddedEventLoop().freezeTime(); }
@Test @Timeout(30) // generous timeout, just make sure we don't actually wait for the full 10 mins... void testFreezeTime() { EmbeddedChannel channel = new EmbeddedChannel(); Runnable runnable = new Runnable() { @Override public void run() { } }; channel.freezeTime(); // this future will complete after 10min ScheduledFuture<?> future10 = channel.eventLoop().schedule(runnable, 10, TimeUnit.MINUTES); // this future will complete after 10min + 1ns ScheduledFuture<?> future101 = channel.eventLoop().schedule(runnable, TimeUnit.MINUTES.toNanos(10) + 1, TimeUnit.NANOSECONDS); // this future will complete after 20min ScheduledFuture<?> future20 = channel.eventLoop().schedule(runnable, 20, TimeUnit.MINUTES); channel.runPendingTasks(); assertFalse(future10.isDone()); assertFalse(future101.isDone()); assertFalse(future20.isDone()); channel.advanceTimeBy(10, TimeUnit.MINUTES); channel.runPendingTasks(); assertTrue(future10.isDone()); assertFalse(future101.isDone()); assertFalse(future20.isDone()); channel.unfreezeTime(); channel.runPendingTasks(); assertTrue(future101.isDone()); assertFalse(future20.isDone()); }
@Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN, OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE, OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY); cf.parse(args); pathOnly = cf.getOpt(OPTION_PATHONLY); dirRecurse = !cf.getOpt(OPTION_DIRECTORY); setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse); humanReadable = cf.getOpt(OPTION_HUMAN); hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE); orderReverse = cf.getOpt(OPTION_REVERSE); orderTime = cf.getOpt(OPTION_MTIME); orderSize = !orderTime && cf.getOpt(OPTION_SIZE); useAtime = cf.getOpt(OPTION_ATIME); displayECPolicy = cf.getOpt(OPTION_ECPOLICY); if (args.isEmpty()) args.add(Path.CUR_DIR); initialiseOrderComparator(); }
@Test public void processPathDirOrderDefaultReverse() throws IOException { TestFile testfile01 = new TestFile("testDirectory", "testFile01"); TestFile testfile02 = new TestFile("testDirectory", "testFile02"); TestFile testfile03 = new TestFile("testDirectory", "testFile03"); TestFile testfile04 = new TestFile("testDirectory", "testFile04"); TestFile testfile05 = new TestFile("testDirectory", "testFile05"); TestFile testfile06 = new TestFile("testDirectory", "testFile06"); TestFile testDir = new TestFile("", "testDirectory"); testDir.setIsDir(true); // add contents in non-lexigraphic order to show they get sorted testDir.addContents(testfile01, testfile03, testfile05, testfile02, testfile04, testfile06); LinkedList<PathData> pathData = new LinkedList<PathData>(); pathData.add(testDir.getPathData()); PrintStream out = mock(PrintStream.class); Ls ls = new Ls(); ls.out = out; LinkedList<String> options = new LinkedList<String>(); options.add("-r"); ls.processOptions(options); String lineFormat = TestFile.computeLineFormat(pathData); ls.processArguments(pathData); InOrder inOrder = inOrder(out); inOrder.verify(out).println("Found 6 items"); inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat)); verifyNoMoreInteractions(out); }
public static StreamDecoder create(Decoder iteratorDecoder) { return new StreamDecoder(iteratorDecoder, null); }
@Test void simpleDefaultStreamTest() { MockWebServer server = new MockWebServer(); server.enqueue(new MockResponse().setBody("foo\nbar")); StreamInterface api = Feign.builder() .decoder(StreamDecoder.create((r, t) -> { BufferedReader bufferedReader = new BufferedReader(r.body().asReader(UTF_8)); return bufferedReader.lines().iterator(); })) .doNotCloseAfterDecode() .target(StreamInterface.class, server.url("/").toString()); try (Stream<String> stream = api.get()) { assertThat(stream.collect(Collectors.toList())).isEqualTo(Arrays.asList("foo", "bar")); } }
public static int compareVersion(final String versionA, final String versionB) { final String[] sA = versionA.split("\\."); final String[] sB = versionB.split("\\."); int expectSize = 3; if (sA.length != expectSize || sB.length != expectSize) { throw new IllegalArgumentException("version must be like x.y.z(-beta)"); } int first = Objects.compare(sA[0], sB[0], STRING_COMPARATOR); if (first != 0) { return first; } int second = Objects.compare(sA[1], sB[1], STRING_COMPARATOR); if (second != 0) { return second; } return Objects.compare(sA[2].split("-")[0], sB[2].split("-")[0], STRING_COMPARATOR); }
@Test void testVersionCompareLtWithChar() { assertTrue(VersionUtils.compareVersion("1.2.0-beta", "1.2.1") < 0); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void nonCircularGraphShouldWork() throws ScanException { context.putProperty("A", "${B} and ${C}"); context.putProperty("B", "${B1}"); context.putProperty("B1", "B1-value"); context.putProperty("C", "${C1} and ${B}"); context.putProperty("C1", "C1-value"); String result = OptionHelper.substVars("${A}", context); assertEquals("B1-value and C1-value and B1-value", result); }
@Override public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> schemaName) { return jdbcClient.getTableNames(session, JdbcIdentity.from(session), schemaName); }
@Test public void testListTables() { // all schemas assertEquals(ImmutableSet.copyOf(metadata.listTables(SESSION, Optional.empty())), ImmutableSet.of( new SchemaTableName("example", "numbers"), new SchemaTableName("example", "view_source"), new SchemaTableName("example", "view"), new SchemaTableName("tpch", "orders"), new SchemaTableName("tpch", "lineitem"), new SchemaTableName("exa_ple", "table_with_float_col"), new SchemaTableName("exa_ple", "num_ers"))); // specific schema assertEquals(ImmutableSet.copyOf(metadata.listTables(SESSION, Optional.of("example"))), ImmutableSet.of( new SchemaTableName("example", "numbers"), new SchemaTableName("example", "view_source"), new SchemaTableName("example", "view"))); assertEquals(ImmutableSet.copyOf(metadata.listTables(SESSION, Optional.of("tpch"))), ImmutableSet.of( new SchemaTableName("tpch", "orders"), new SchemaTableName("tpch", "lineitem"))); assertEquals(ImmutableSet.copyOf(metadata.listTables(SESSION, Optional.of("exa_ple"))), ImmutableSet.of( new SchemaTableName("exa_ple", "num_ers"), new SchemaTableName("exa_ple", "table_with_float_col"))); // unknown schema assertEquals(ImmutableSet.copyOf(metadata.listTables(SESSION, Optional.of("unknown"))), ImmutableSet.of()); }