focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public final void steal() { var target = pickTarget(); LOGGER.info("The target has been chosen as {}.", target); confuseTarget(target); stealTheItem(target); }
@Test void testSteal() { this.method.steal(); assertTrue(appender.logContains(this.expectedTargetResult)); assertTrue(appender.logContains(this.expectedConfuseMethod)); assertTrue(appender.logContains(this.expectedStealMethod)); assertEquals(3, appender.getLogSize()); }
public static <T> Read<T> read() { return new AutoValue_CassandraIO_Read.Builder<T>().build(); }
@Test public void testReadWithQuery() throws Exception { String query = String.format( "select person_id, writetime(person_name) from %s.%s where person_id=10 AND person_department='logic'", CASSANDRA_KEYSPACE, CASSANDRA_TABLE); PCollection<Scientist> output = pipeline.apply( CassandraIO.<Scientist>read() .withHosts(Collections.singletonList(CASSANDRA_HOST)) .withPort(cassandraPort) .withKeyspace(CASSANDRA_KEYSPACE) .withTable(CASSANDRA_TABLE) .withMinNumberOfSplits(20) .withQuery(query) .withCoder(SerializableCoder.of(Scientist.class)) .withEntity(Scientist.class)); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(1L); PAssert.that(output) .satisfies( input -> { for (Scientist sci : input) { assertNull(sci.name); assertTrue(sci.nameTs != null && sci.nameTs > 0); } return null; }); pipeline.run(); }
@Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, AlterJobV2.class); Text.writeString(out, json); }
@Test public void testSerializeOfRollupJob() throws IOException, AnalysisException { Config.enable_materialized_view = true; // prepare file String fileName = "./RollupJobV2Test"; File file = new File(fileName); file.createNewFile(); DataOutputStream out = new DataOutputStream(new FileOutputStream(file)); short keysCount = 1; List<Column> columns = Lists.newArrayList(); String mvColumnName = MVUtils.MATERIALIZED_VIEW_NAME_PREFIX + "bitmap_union_" + "c1"; Column column = new Column(mvColumnName, Type.BITMAP, false, AggregateType.BITMAP_UNION, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("1")), ""); columns.add(column); RollupJobV2 rollupJobV2 = new RollupJobV2(1, 1, 1, "test", 1, 1, 1, "test", "rollup", 0, columns, null, 1, 1, KeysType.AGG_KEYS, keysCount, new OriginStatement("create materialized view rollup as select bitmap_union(to_bitmap(c1)) from test", 0), "", false); // write rollup job rollupJobV2.write(out); out.flush(); out.close(); // read objects from file DataInputStream in = new DataInputStream(new FileInputStream(file)); RollupJobV2 result = (RollupJobV2) AlterJobV2.read(in); List<Column> resultColumns = Deencapsulation.getField(result, "rollupSchema"); assertEquals(1, resultColumns.size()); Column resultColumn1 = resultColumns.get(0); assertEquals(mvColumnName, resultColumn1.getName()); Assert.assertTrue(resultColumn1.getDefineExpr() instanceof FunctionCallExpr); FunctionCallExpr resultFunctionCall = (FunctionCallExpr) resultColumn1.getDefineExpr(); assertEquals("to_bitmap", resultFunctionCall.getFnName().getFunction()); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldThrowIfSchemaNotPresent() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); givenTopicsExist("bar"); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> execute( serviceContext, ksqlEngine, "create stream bar with (key_format='kafka', value_format='avro', kafka_topic='bar');", ksqlConfig, emptyMap() ) ); // Then: assertThat(e, rawMessage( containsString( "The statement does not define any columns."))); assertThat(e, statementText( is( "create stream bar with (key_format='kafka', value_format='avro', kafka_topic='bar');"))); }
@VisibleForTesting synchronized VolumeScanner.Statistics getVolumeStats(String volumeId) { VolumeScanner scanner = scanners.get(volumeId); if (scanner == null) { return null; } return scanner.getStatistics(); }
@Test(timeout=120000) public void testMultipleBlockPoolScanning() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L); conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, TestScanResultHandler.class.getName()); final TestContext ctx = new TestContext(conf, 3); // We scan 5 bytes per file (1 byte in file, 4 bytes of checksum) final int BYTES_SCANNED_PER_FILE = 5; int TOTAL_FILES = 16; ctx.createFiles(0, TOTAL_FILES, 1); // start scanning final TestScanResultHandler.Info info = TestScanResultHandler.getInfo(ctx.volumes.get(0)); synchronized (info) { info.shouldRun = true; info.notify(); } // Wait for all the block pools to be scanned. GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { synchronized (info) { Statistics stats = ctx.blockScanner.getVolumeStats( ctx.volumes.get(0).getStorageID()); if (stats.scansSinceRestart < 3) { LOG.info("Waiting for scansSinceRestart to reach 3 (it is {})", stats.scansSinceRestart); return false; } if (!stats.eof) { LOG.info("Waiting for eof."); return false; } return true; } } }, 3, 30000); Statistics stats = ctx.blockScanner.getVolumeStats( ctx.volumes.get(0).getStorageID()); assertEquals(TOTAL_FILES, stats.blocksScannedSinceRestart); assertEquals(BYTES_SCANNED_PER_FILE * TOTAL_FILES, stats.bytesScannedInPastHour); ctx.close(); }
@Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { if (remaining.split("/").length > 1) { throw new IllegalArgumentException("Invalid URI: " + URISupport.sanitizeUri(uri)); } SplunkHECEndpoint answer = new SplunkHECEndpoint(uri, this, new SplunkHECConfiguration()); setProperties(answer, parameters); answer.setSplunkURL(remaining); return answer; }
@Test public void testMissingToken() throws Exception { Endpoint endpoint = component.createEndpoint("splunk-hec:localhost:18808"); Exception e = assertThrows(IllegalArgumentException.class, endpoint::init); assertEquals("A token must be defined", e.getMessage()); }
@Override public boolean equals(final Object other) { if(null == other) { return false; } if(other instanceof CacheReference) { return this.hashCode() == other.hashCode(); } return false; }
@Test public void testEquals() { final Path f = new Path("/f", EnumSet.of(Path.Type.file)); assertEquals(new TransferItemCache.TransferItemCacheReference(new TransferItem(f)), new TransferItemCache.TransferItemCacheReference(new TransferItem(f))); assertEquals(new TransferItemCache.TransferItemCacheReference(new TransferItem(f)).hashCode(), new NSObjectPathReference(NSObjectTransferItemReference.get(f)).hashCode()); assertEquals(new TransferItemCache.TransferItemCacheReference(new TransferItem(f)).toString(), new NSObjectPathReference(NSObjectTransferItemReference.get(f)).toString()); assertEquals(new TransferItemCache.TransferItemCacheReference(new TransferItem(f)), new NSObjectPathReference(NSObjectTransferItemReference.get(f))); }
@Override public boolean isIndexed(QueryContext queryContext) { Index index = queryContext.matchIndex(attributeName, QueryContext.IndexMatchHint.PREFER_ORDERED); return index != null && index.isOrdered() && expressionCanBeUsedAsIndexPrefix(); }
@Test public void likePredicateIsNotIndexed_whenPercentWildcardIsNotTheLastSymbol() { QueryContext queryContext = mock(QueryContext.class); when(queryContext.matchIndex("this", QueryContext.IndexMatchHint.PREFER_ORDERED)).thenReturn(createIndex(IndexType.SORTED)); assertFalse(new LikePredicate("this", "sub%str").isIndexed(queryContext)); assertFalse(new LikePredicate("this", "sub% ").isIndexed(queryContext)); }
@Override public PiAction mapTreatment(TrafficTreatment treatment, PiTableId piTableId) throws PiInterpreterException { if (FORWARDING_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapForwardingTreatment(treatment, piTableId); } else if (PRE_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapPreNextTreatment(treatment, piTableId); } else if (ACL_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapAclTreatment(treatment, piTableId); } else if (NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapNextTreatment(treatment, piTableId); } else if (E_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapEgressNextTreatment(treatment, piTableId); } else { throw new PiInterpreterException(format( "Treatment mapping not supported for table '%s'", piTableId)); } }
@Test public void testNextVlanTreatment() throws Exception { TrafficTreatment treatment = DefaultTrafficTreatment.builder() .setVlanId(VLAN_100) .build(); PiAction mappedAction = interpreter.mapTreatment( treatment, FabricConstants.FABRIC_INGRESS_PRE_NEXT_NEXT_VLAN); PiActionParam vlanParam = new PiActionParam( FabricConstants.VLAN_ID, VLAN_100.toShort()); PiAction expectedAction = PiAction.builder() .withId(FabricConstants.FABRIC_INGRESS_PRE_NEXT_SET_VLAN) .withParameter(vlanParam) .build(); assertEquals(expectedAction, mappedAction); }
private <T> T newPlugin(Class<T> klass) { // KAFKA-8340: The thread classloader is used during static initialization and must be // set to the plugin's classloader during instantiation try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { return Utils.newInstance(klass); } catch (Throwable t) { throw new ConnectException("Instantiation error", t); } }
@Test public void newPluginShouldInstantiateWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); assertPluginClassLoaderAlwaysActive(plugin); }
@Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration(getConf()); boolean isFederationEnabled = yarnConf.getBoolean(YarnConfiguration.FEDERATION_ENABLED, YarnConfiguration.DEFAULT_FEDERATION_ENABLED); if (args.length < 1 || !isFederationEnabled) { printUsage(CMD_EMPTY); return EXIT_ERROR; } String cmd = args[0]; if (CMD_HELP.equals(cmd)) { if (args.length > 1) { printUsage(args[1]); } else { printHelp(); } return EXIT_SUCCESS; } else if (CMD_SUBCLUSTER.equals(cmd)) { return handleSubCluster(args); } else if (CMD_POLICY.equals(cmd)) { return handlePolicy(args); } else if (CMD_APPLICATION.equals(cmd)) { return handleApplication(args); } else { System.out.println("No related commands found."); printHelp(); } return EXIT_SUCCESS; }
@Test public void testDeleteFederationPoliciesByQueues() throws Exception { PrintStream oldOutPrintStream = System.out; ByteArrayOutputStream dataOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); oldOutPrintStream.println(dataOut); String[] args = {"-policy", "-d", "--queue", "root.a"}; assertEquals(0, rmAdminCLI.run(args)); }
static CodecFactory getCodecFactory(JobConf job) { CodecFactory factory = null; if (FileOutputFormat.getCompressOutput(job)) { int deflateLevel = job.getInt(DEFLATE_LEVEL_KEY, DEFAULT_DEFLATE_LEVEL); int xzLevel = job.getInt(XZ_LEVEL_KEY, DEFAULT_XZ_LEVEL); int zstdLevel = job.getInt(ZSTD_LEVEL_KEY, DEFAULT_ZSTANDARD_LEVEL); boolean zstdBufferPool = job.getBoolean(ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL); String codecName = job.get(AvroJob.OUTPUT_CODEC); if (codecName == null) { String codecClassName = job.get("mapred.output.compression.codec", null); String avroCodecName = HadoopCodecFactory.getAvroCodecName(codecClassName); if (codecClassName != null && avroCodecName != null) { factory = HadoopCodecFactory.fromHadoopString(codecClassName); job.set(AvroJob.OUTPUT_CODEC, avroCodecName); return factory; } else { return CodecFactory.deflateCodec(deflateLevel); } } else { if (codecName.equals(DEFLATE_CODEC)) { factory = CodecFactory.deflateCodec(deflateLevel); } else if (codecName.equals(XZ_CODEC)) { factory = CodecFactory.xzCodec(xzLevel); } else if (codecName.equals(ZSTANDARD_CODEC)) { factory = CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool); } else { factory = CodecFactory.fromString(codecName); } } } return factory; }
@Test void snappyCodecUsingHadoopClass() { CodecFactory avroSnappyCodec = CodecFactory.fromString("snappy"); JobConf job = new JobConf(); job.set("mapred.output.compress", "true"); job.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec"); CodecFactory factory = AvroOutputFormat.getCodecFactory(job); assertNotNull(factory); assertEquals(factory.getClass(), avroSnappyCodec.getClass()); }
public void schedule(ExecutableMethod<?, ?> method) { if (hasParametersOutsideOfJobContext(method.getTargetMethod())) { throw new IllegalStateException("Methods annotated with " + Recurring.class.getName() + " can only have zero parameters or a single parameter of type JobContext."); } String id = getId(method); String cron = getCron(method); String interval = getInterval(method); if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(interval)) throw new IllegalArgumentException("Either cron or interval attribute is required."); if (isNotNullOrEmpty(cron) && isNotNullOrEmpty(interval)) throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed."); if (Recurring.RECURRING_JOB_DISABLED.equals(cron) || Recurring.RECURRING_JOB_DISABLED.equals(interval)) { if (id == null) { LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id."); } else { jobScheduler.deleteRecurringJob(id); } } else { JobDetails jobDetails = getJobDetails(method); ZoneId zoneId = getZoneId(method); if (isNotNullOrEmpty(cron)) { jobScheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(cron), zoneId); } else { jobScheduler.scheduleRecurrently(id, jobDetails, new Interval(interval), zoneId); } } }
@Test void beansWithMethodsAnnotatedWithDisabledRecurringCronAnnotationWillAutomaticallyBeDeleted() { final ExecutableMethod executableMethod = mock(ExecutableMethod.class); final Method method = getRequiredMethod(MyServiceWithRecurringJob.class, "myRecurringMethod"); when(executableMethod.getTargetMethod()).thenReturn(method); when(executableMethod.stringValue(Recurring.class, "id")).thenReturn(Optional.of("my-recurring-job")); when(executableMethod.stringValue(Recurring.class, "cron")).thenReturn(Optional.of("-")); when(executableMethod.stringValue(Recurring.class, "interval")).thenReturn(Optional.empty()); jobRunrRecurringJobScheduler.schedule(executableMethod); verify(jobScheduler).deleteRecurringJob("my-recurring-job"); }
public void runPickle(Pickle pickle) { try { StepTypeRegistry stepTypeRegistry = createTypeRegistryForPickle(pickle); snippetGenerators = createSnippetGeneratorsForPickle(stepTypeRegistry); // Java8 step definitions will be added to the glue here buildBackendWorlds(); glue.prepareGlue(stepTypeRegistry); TestCase testCase = createTestCaseForPickle(pickle); testCase.run(bus); } finally { glue.removeScenarioScopedGlue(); disposeBackendWorlds(); } }
@Test void hooks_execute_also_after_failure() { HookDefinition beforeHook = createHook(); HookDefinition afterHook = createHook(); HookDefinition failingBeforeHook = createHook(); doThrow(new RuntimeException("boom")).when(failingBeforeHook).execute(any(TestCaseState.class)); TestRunnerSupplier runnerSupplier = new TestRunnerSupplier(bus, runtimeOptions) { @Override public void loadGlue(Glue glue, List<URI> gluePaths) { glue.addBeforeHook(failingBeforeHook); glue.addBeforeHook(beforeHook); glue.addAfterHook(afterHook); } }; runnerSupplier.get().runPickle(createPicklesWithSteps()); InOrder inOrder = inOrder(failingBeforeHook, beforeHook, afterHook); inOrder.verify(failingBeforeHook).execute(any(TestCaseState.class)); inOrder.verify(beforeHook).execute(any(TestCaseState.class)); inOrder.verify(afterHook).execute(any(TestCaseState.class)); }
public static org.threeten.bp.Instant toThreetenInstant(org.joda.time.Instant jodaInstant) { return org.threeten.bp.Instant.ofEpochMilli(jodaInstant.getMillis()); }
@Test public void testToThreetenInstant() { org.joda.time.Instant jodaInstant = org.joda.time.Instant.ofEpochMilli(1_000_000_000L); assertEquals(1_000_000_000L, toThreetenInstant(jodaInstant).toEpochMilli()); }
public static void free(final DirectBuffer buffer) { if (null != buffer) { free(buffer.byteBuffer()); } }
@Test void freeIsANoOpIfByteBufferIsNotDirect() { final ByteBuffer buffer = ByteBuffer.allocate(4); BufferUtil.free(buffer); buffer.put(2, (byte)101); assertEquals(101, buffer.get(2)); }
@Override public CiConfiguration loadConfiguration() { String revision = system.envVariable("BITBUCKET_COMMIT"); return new CiConfigurationImpl(revision, getName()); }
@Test public void configuration_of_pull_request() { setEnvVariable("CI", "true"); setEnvVariable("BITBUCKET_COMMIT", "abd12fc"); setEnvVariable("BITBUCKET_PR_ID", "1234"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abd12fc"); }
public static AddressMatcher getAddressMatcher(String address) { final AddressMatcher matcher; final int indexColon = address.indexOf(':'); final int lastIndexColon = address.lastIndexOf(':'); final int indexDot = address.indexOf('.'); final int lastIndexDot = address.lastIndexOf('.'); if (indexColon > -1 && lastIndexColon > indexColon) { if (indexDot == -1) { matcher = new Ip6AddressMatcher(); parseIpv6(matcher, address); } else { // IPv4 mapped IPv6 if (indexDot >= lastIndexDot) { throw new InvalidAddressException(address); } final int lastIndexColon2 = address.lastIndexOf(':'); final String host2 = address.substring(lastIndexColon2 + 1); matcher = new Ip4AddressMatcher(); parseIpv4(matcher, host2); } } else if (indexDot > -1 && lastIndexDot > indexDot && indexColon == -1) { // IPv4 matcher = new Ip4AddressMatcher(); parseIpv4(matcher, address); } else { throw new InvalidAddressException(address); } return matcher; }
@Test public void testAddressMatcher() { AddressMatcher address; address = AddressUtil.getAddressMatcher("fe80::62c5:*:fe05:480a%en0"); assertTrue(address.isIPv6()); assertEquals("fe80:0:0:0:62c5:*:fe05:480a", address.getAddress()); address = AddressUtil.getAddressMatcher("192.168.1.1"); assertTrue(address instanceof Ip4AddressMatcher); assertEquals("192.168.1.1", address.getAddress()); address = AddressUtil.getAddressMatcher("::ffff:192.0.2.128"); assertTrue(address.isIPv4()); assertEquals("192.0.2.128", address.getAddress()); }
public String toJson(boolean pretty) { return SlimeUtils.toJson(inspector, !pretty); }
@Test void create_builder_from_existing_json() { var jsonArray = Json.Builder.newArray() .add(1) .add(2) .add(3) .build(); var jsonArrayCopy = Json.Builder.fromArray(jsonArray).build(); assertEquals(jsonArray.toJson(false), jsonArrayCopy.toJson(false)); var jsonObject = Json.Builder.newObject() .set("foo", "bar") .set("baz", Json.Builder.newArray().add("qux")) .build(); var jsonObjectCopy = Json.Builder.fromObject(jsonObject).build(); assertEquals(jsonObject.toJson(false), jsonObjectCopy.toJson(false)); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchedRecordsRaisesOnSerializationErrors() { // raise an exception from somewhere in the middle of the fetch response // so that we can verify that our position does not advance after raising ByteArrayDeserializer deserializer = new ByteArrayDeserializer() { int i = 0; @Override public byte[] deserialize(String topic, byte[] data) { if (i++ % 2 == 1) { // Should be blocked on the value deserialization of the first record. assertEquals("value-1", new String(data, StandardCharsets.UTF_8)); throw new SerializationException(); } return data; } }; buildFetcher(deserializer, deserializer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 1); client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); for (int i = 0; i < 2; i++) { // The fetcher should throw a Deserialization error assertThrows(SerializationException.class, this::collectFetch); // the position should not advance since no data has been returned assertEquals(1, subscriptions.position(tp0).offset); } }
@Override public void handleDataChange(String dataPath, Object data) { refresh(); }
@Test public void testHandleDataChange() { _dynamicBrokerSelectorUnderTest.handleDataChange("dataPath", "data"); verify(_mockExternalViewReader, times(2)).getTableToBrokersMap(); }
@Override public Map<String, Validator> getValidations() { return ImmutableMap.<String, Validator>builder() .put(USERNAME, new LimitedStringValidator(1, MAX_USERNAME_LENGTH)) .put(PASSWORD, new FilledStringValidator()) .put(EMAIL, new LimitedStringValidator(1, MAX_EMAIL_LENGTH)) .put(FIRST_NAME, new LimitedOptionalStringValidator(MAX_FIRST_LAST_NAME_LENGTH)) .put(LAST_NAME, new LimitedOptionalStringValidator(MAX_FIRST_LAST_NAME_LENGTH)) .put(FULL_NAME, new LimitedOptionalStringValidator(MAX_FULL_NAME_LENGTH)) .put(PERMISSIONS, new ListValidator()) .put(ROLES, new ListValidator(true)) .build(); }
@Test public void testFirstNameLengthValidation() { user = createUserImpl(null, null, null); ValidationResult result = user.getValidations().get(UserImpl.FIRST_NAME) .validate(StringUtils.repeat("*", 10)); assertTrue(result.passed()); result = user.getValidations().get(UserImpl.FIRST_NAME) .validate(StringUtils.repeat("*", 210)); assertFalse(result.passed()); }
public T multiply(BigDecimal multiplier) { return create(value.multiply(multiplier)); }
@Test void testMutiplyNegative() { final Resource resource = new TestResource(0.3); final BigDecimal by = BigDecimal.valueOf(-0.2); assertThatThrownBy(() -> resource.multiply(by)) .isInstanceOf(IllegalArgumentException.class); }
public final short getFileReplication(int snapshot) { if (snapshot != CURRENT_STATE_ID) { return getSnapshotINode(snapshot).getFileReplication(); } return HeaderFormat.getReplication(header); }
@Test public void testReplication () { replication = 3; preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); assertEquals("True has to be returned in this case", replication, inf.getFileReplication()); }
@Override public boolean ownUpdatesAreVisible(final int type) { return false; }
@Test void assertOwnUpdatesAreVisible() { assertFalse(metaData.ownUpdatesAreVisible(0)); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command(config, MigrationsUtil::getKsqlClient); }
@Test public void shouldCleanEvenIfCantParseServerVersion() { // Given: when(serverInfo.getServerVersion()).thenReturn("not_a_valid_version"); // When: final int status = command.command(config, cfg -> client); // Then: assertThat(status, is(0)); verify(client).executeStatement("TERMINATE " + CTAS_QUERY_ID + ";"); verify(client).executeStatement("DROP TABLE " + MIGRATIONS_TABLE + " DELETE TOPIC;"); verify(client).executeStatement("DROP STREAM " + MIGRATIONS_STREAM + " DELETE TOPIC;"); }
@Override public String getRawSourceHash(Component file) { checkComponentArgument(file); if (rawSourceHashesByKey.containsKey(file.getKey())) { return checkSourceHash(file.getKey(), rawSourceHashesByKey.get(file.getKey())); } else { String newSourceHash = computeRawSourceHash(file); rawSourceHashesByKey.put(file.getKey(), newSourceHash); return checkSourceHash(file.getKey(), newSourceHash); } }
@Test void getRawSourceHash_throws_NPE_if_Component_argument_is_null() { assertThatThrownBy(() -> underTest.getRawSourceHash(null)) .isInstanceOf(NullPointerException.class) .hasMessage("Specified component can not be null"); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest req) { var certs = req.getClientCertificateChain(); log.fine(() -> "Certificate chain contains %d elements".formatted(certs.size())); if (certs.isEmpty()) { log.fine("Missing client certificate"); return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized")); } if (legacyMode) { log.fine("Legacy mode validation complete"); ClientPrincipal.attachToRequest(req, Set.of(), Set.of(READ, WRITE)); return Optional.empty(); } var permission = Permission.getRequiredPermission(req).orElse(null); if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); var clientCert = certs.get(0); var clientIds = new TreeSet<String>(); var permissions = new TreeSet<Permission>(); for (Client c : allowedClients) { if (!c.permissions().contains(permission)) continue; if (!c.certificates().contains(clientCert)) continue; clientIds.add(c.id()); permissions.addAll(c.permissions()); } if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); ClientPrincipal.attachToRequest(req, clientIds, permissions); return Optional.empty(); }
@Test void fails_on_handler_with_custom_request_spec_with_invalid_action() { // Spec that maps POST as action 'read' var spec = RequestHandlerSpec.builder() .withAclMapping(HttpMethodAclMapping.standard() .override(Method.GET, Action.custom("custom")).build()) .build(); var req = FilterTestUtils.newRequestBuilder() .withMethod(Method.GET) .withClientCertificate(SEARCH_CERT) .withAttribute(RequestHandlerSpec.ATTRIBUTE_NAME, spec) .build(); var responseHandler = new MockResponseHandler(); newFilterWithClientsConfig().filter(req, responseHandler); assertNotNull(responseHandler.getResponse()); assertEquals(FORBIDDEN, responseHandler.getResponse().getStatus()); }
public Collection<SQLToken> generateSQLTokens(final SelectStatementContext selectStatementContext) { Collection<SQLToken> result = new LinkedHashSet<>(generateSelectSQLTokens(selectStatementContext)); selectStatementContext.getSubqueryContexts().values().stream().map(this::generateSelectSQLTokens).forEach(result::addAll); return result; }
@Test void assertGenerateSQLTokensWhenOwnerMatchTableAlias() { SimpleTableSegment doctorTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor"))); doctorTable.setAlias(new AliasSegment(0, 0, new IdentifierValue("a"))); ColumnSegment column = new ColumnSegment(0, 0, new IdentifierValue("mobile")); column.setColumnBoundInfo(new ColumnSegmentBoundInfo(new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue("doctor"), new IdentifierValue("mobile"))); column.setOwner(new OwnerSegment(0, 0, new IdentifierValue("a"))); ProjectionsSegment projections = mock(ProjectionsSegment.class); when(projections.getProjections()).thenReturn(Collections.singleton(new ColumnProjectionSegment(column))); SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(sqlStatementContext.getSubqueryType()).thenReturn(null); when(sqlStatementContext.getDatabaseType()).thenReturn(databaseType); when(sqlStatementContext.getSqlStatement().getProjections()).thenReturn(projections); when(sqlStatementContext.getSubqueryContexts().values()).thenReturn(Collections.emptyList()); SimpleTableSegment doctorOneTable = new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("doctor1"))); when(sqlStatementContext.getTablesContext()).thenReturn(new TablesContext(Arrays.asList(doctorTable, doctorOneTable), databaseType, DefaultDatabase.LOGIC_NAME)); when(sqlStatementContext.getProjectionsContext().getProjections()).thenReturn(Collections.singleton(new ColumnProjection("a", "mobile", null, databaseType))); Collection<SQLToken> actual = generator.generateSQLTokens(sqlStatementContext); assertThat(actual.size(), is(1)); }
private XException(ERROR error, String message, Throwable cause) { super(message, cause); this.error = error; }
@Test public void testXException() throws Exception { XException ex = new XException(TestERROR.TC); assertEquals(ex.getError(), TestERROR.TC); assertEquals(ex.getMessage(), "TC: {0}"); assertNull(ex.getCause()); ex = new XException(TestERROR.TC, "msg"); assertEquals(ex.getError(), TestERROR.TC); assertEquals(ex.getMessage(), "TC: msg"); assertNull(ex.getCause()); Exception cause = new Exception(); ex = new XException(TestERROR.TC, cause); assertEquals(ex.getError(), TestERROR.TC); assertEquals(ex.getMessage(), "TC: " + cause.toString()); assertEquals(ex.getCause(), cause); XException xcause = ex; ex = new XException(xcause); assertEquals(ex.getError(), TestERROR.TC); assertEquals(ex.getMessage(), xcause.getMessage()); assertEquals(ex.getCause(), xcause); }
public final void isAtMost(int other) { asDouble.isAtMost(other); }
@Test public void isAtMost_int() { expectFailureWhenTestingThat(2.0f).isAtMost(1); assertThat(2.0f).isAtMost(2); assertThat(2.0f).isAtMost(3); }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void cluster_down_if_less_than_min_ratio_of_distributors_available() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) .bringEntireClusterUp() .reportDistributorNodeState(0, State.DOWN) .reportDistributorNodeState(2, State.DOWN); final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5); // TODO de-dupe a lot of these tests? final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3")); assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO))); }
public static Builder<String, String> builder(String bootstrapServers, String... topics) { return new Builder<String, String>(bootstrapServers, topics).withStringDeserializers(); }
@Test public void testWillRespectExplicitAutoOffsetResetPolicy() { KafkaSpoutConfig<String, String> conf = KafkaSpoutConfig.builder("localhost:1234", "topic") .setProp(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none") .build(); assertThat("Should allow users to pick a different auto offset reset policy than the one recommended for the at-least-once processing guarantee", conf.getKafkaProps().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), is("none")); }
public static ILogger getLogger(@Nonnull Class<?> clazz) { checkNotNull(clazz, "class must not be null"); return getLoggerInternal(clazz.getName()); }
@Test public void getLogger_thenLog4j_thenReturnLog4jLogger() { isolatedLoggingRule.setLoggingType(LOGGING_TYPE_LOG4J); assertInstanceOf(Log4jFactory.Log4jLogger.class, Logger.getLogger(getClass())); }
public DdlCommandResult execute( final String sql, final DdlCommand ddlCommand, final boolean withQuery, final Set<SourceName> withQuerySources ) { return execute(sql, ddlCommand, withQuery, withQuerySources, false); }
@Test public void shouldThrowOnAddExistingColumn() { // Given: givenCreateStream(); cmdExec.execute(SQL_TEXT, createStream, false, NO_QUERY_SOURCES); alterSource = new AlterSourceCommand(STREAM_NAME, DataSourceType.KSTREAM.getKsqlType(), SCHEMA2.columns()); // When: final KsqlException e = assertThrows(KsqlException.class, () -> cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES)); // Then: assertThat(e.getMessage(), is("Cannot add column `F1` to schema. A column with the same name already exists.")); }
@Override @Deprecated @SuppressWarnings("unchecked") public <T extends Number> Counter<T> counter(String name, Class<T> type, Unit unit) { if (Integer.class.equals(type)) { return (Counter<T>) new DefaultCounter(unit).asIntCounter(); } if (Long.class.equals(type)) { return (Counter<T>) new DefaultCounter(unit).asLongCounter(); } throw new IllegalArgumentException( String.format("Counter for type %s is not supported", type.getName())); }
@Test public void intCounterOverflow() { MetricsContext metricsContext = new DefaultMetricsContext(); MetricsContext.Counter<Integer> counter = metricsContext.counter("test", Integer.class, MetricsContext.Unit.COUNT); counter.increment(Integer.MAX_VALUE); counter.increment(); assertThatThrownBy(counter::value) .isInstanceOf(ArithmeticException.class) .hasMessage("integer overflow"); }
public static void addNumLiveVersionMetric(final StreamsMetricsImpl streamsMetrics, final RocksDBMetricContext metricContext, final Gauge<BigInteger> valueProvider) { addMutableMetric( streamsMetrics, metricContext, valueProvider, NUMBER_OF_LIVE_VERSIONS, NUMBER_OF_LIVE_VERSIONS_DESCRIPTION ); }
@Test public void shouldAddNumLiveVersionMetric() { final String name = "num-live-versions"; final String description = "Number of live versions of the LSM tree"; runAndVerifyMutableMetric( name, description, () -> RocksDBMetrics.addNumLiveVersionMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER) ); }
@Override public void start() throws Exception { validateConfiguration(configs, registry.getNames()); }
@Test void startValidationsShouldFailIfAHealthCheckConfiguredButNotRegistered() throws Exception { // given ArgumentCaptor<LoggingEvent> captor = ArgumentCaptor.forClass(LoggingEvent.class); List<HealthCheckConfiguration> configs = new ArrayList<>(); HealthCheckConfiguration check1 = new HealthCheckConfiguration(); check1.setName("check-1"); configs.add(check1); HealthCheckConfiguration check2 = new HealthCheckConfiguration(); check2.setName("check-2"); configs.add(check2); HealthCheckConfiguration check3 = new HealthCheckConfiguration(); check3.setName("check-3"); configs.add(check3); HealthCheckRegistry registry = new HealthCheckRegistry(); registry.register("check-1", mock(HealthCheck.class)); // when try { HealthCheckConfigValidator validator = new HealthCheckConfigValidator(unmodifiableList(configs), registry); validator.start(); fail("configured health checks that aren't registered should fail"); } catch (IllegalStateException e) { // then verify(mockLogAppender).doAppend(captor.capture()); LoggingEvent logEvent = captor.getValue(); assertThat(logEvent.getLevel()) .isEqualTo(Level.ERROR); assertThat(logEvent.getFormattedMessage()) .doesNotContain(" * check-1") .contains(" * check-2\n * check-3"); assertThat(e.getMessage()) .contains("[check-2, check-3]"); } }
static int getIndex(CharSequence name) { HeaderNameIndex entry = getEntry(name); return entry == null ? NOT_FOUND : entry.index; }
@Test public void testMissingHeaderName() { assertEquals(-1, HpackStaticTable.getIndex("missing")); }
public static BigDecimal[] toDecimalArray(String name, Object value) { try { if (value instanceof BigDecimal[]) { return (BigDecimal[]) value; } else if (value instanceof double[]) { return Arrays.stream((double[]) value) .mapToObj(val -> new BigDecimal(String.valueOf(val))) .toArray(BigDecimal[]::new); } else if (value instanceof List) { return ((List<?>) value) .stream().map(d -> new BigDecimal(String.valueOf(d))).toArray(BigDecimal[]::new); } else { throw new MaestroInternalError( "Cannot cast value [%s] into a BigDecimal array for param [%s]", toTruncateString(value), name); } } catch (NumberFormatException nfe) { throw new MaestroInternalError( nfe, "Invalid number format for value: %s for param [%s]", toTruncateString(value), name); } }
@Test public void testDoubleArrayToDecimalArray() { Object val = new double[] {1.2, 3.4, 5.6}; BigDecimal[] actual = ParamHelper.toDecimalArray("foo", val); assertEquals(1.2, actual[0].doubleValue(), 0.00000000); assertEquals(3.4, actual[1].doubleValue(), 0.00000000); assertEquals(5.6, actual[2].doubleValue(), 0.00000000); }
@Override public boolean tryInit(long expectedInsertions, double falseProbability) { return get(tryInitAsync(expectedInsertions, falseProbability)); }
@Test public void testFalseProbability1() { Assertions.assertThrows(IllegalArgumentException.class, () -> { RBloomFilter<String> filter = redisson.getBloomFilter("filter"); filter.tryInit(1, -1); }); }
@Override public boolean equals(Object o) { if ( this == o ) return true; if ( !(o instanceof RangeImpl) ) return false; RangeImpl range = (RangeImpl) o; if ( lowBoundary != range.lowBoundary ) return false; if ( highBoundary != range.highBoundary ) return false; if ( lowEndPoint != null ? !lowEndPoint.equals( range.lowEndPoint ) : range.lowEndPoint != null ) return false; return highEndPoint != null ? highEndPoint.equals( range.highEndPoint ) : range.highEndPoint == null; }
@Test void equals() { RangeImpl rangeImpl = new RangeImpl(Range.RangeBoundary.OPEN, 10, 15, Range.RangeBoundary.OPEN); assertThat(rangeImpl).isEqualTo(rangeImpl); RangeImpl rangeImpl2 = new RangeImpl(Range.RangeBoundary.OPEN, 10, 15, Range.RangeBoundary.OPEN); assertThat(rangeImpl2).isEqualTo(rangeImpl); rangeImpl2 = new RangeImpl(Range.RangeBoundary.OPEN, 10, 15, Range.RangeBoundary.CLOSED); assertThat(rangeImpl2).isNotEqualTo(rangeImpl); rangeImpl2 = new RangeImpl(Range.RangeBoundary.CLOSED, 10, 15, Range.RangeBoundary.OPEN); assertThat(rangeImpl2).isNotEqualTo(rangeImpl); rangeImpl2 = new RangeImpl(Range.RangeBoundary.CLOSED, 10, 15, Range.RangeBoundary.CLOSED); assertThat(rangeImpl2).isNotEqualTo(rangeImpl); rangeImpl2 = new RangeImpl(Range.RangeBoundary.CLOSED, 12, 15, Range.RangeBoundary.CLOSED); assertThat(rangeImpl2).isNotEqualTo(rangeImpl); rangeImpl2 = new RangeImpl(Range.RangeBoundary.CLOSED, 12, 17, Range.RangeBoundary.CLOSED); assertThat(rangeImpl2).isNotEqualTo(rangeImpl); rangeImpl = new RangeImpl(); assertThat(rangeImpl).isEqualTo(rangeImpl); }
public static String toString(JobId jid) { return jid.toString(); }
@Test @Timeout(120000) public void testTaskAttemptIDtoString() { TaskAttemptId taid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class); taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class)); taid.getTaskId().setTaskType(TaskType.MAP); taid.getTaskId() .setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0, 0)); assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid)); }
@Override public String toString() { return toStringHelper(getClass()) .add("hardwareType", Short.toString(hardwareType)) .add("protocolType", Short.toString(protocolType)) .add("hardwareAddressLength", Byte.toString(hardwareAddressLength)) .add("protocolAddressLength", Byte.toString(protocolAddressLength)) .add("opCode", Short.toString(opCode)) .add("senderHardwareAddress", MacAddress.valueOf(senderHardwareAddress)) .add("senderProtocolAddress", Ip4Address.valueOf(senderProtocolAddress)) .add("targetHardwareAddress", MacAddress.valueOf(targetHardwareAddress)) .add("targetProtocolAddress", Ip4Address.valueOf(targetProtocolAddress)) .toString(); }
@Test public void testToStringArp() throws Exception { ARP arp = deserializer.deserialize(byteHeader, 0, byteHeader.length); String str = arp.toString(); assertTrue(StringUtils.contains(str, "hardwareAddressLength=" + hwAddressLength)); assertTrue(StringUtils.contains(str, "protocolAddressLength=" + protoAddressLength)); assertTrue(StringUtils.contains(str, "senderHardwareAddress=" + srcMac)); assertTrue(StringUtils.contains(str, "senderProtocolAddress=" + srcIp)); assertTrue(StringUtils.contains(str, "targetHardwareAddress=" + targetMac)); assertTrue(StringUtils.contains(str, "targetProtocolAddress=" + targetIp)); }
public void setTarget(final String hostname) { this.target.set(hostname); }
@Test public void testSetTarget() { assertEquals("s3.amazonaws.com", new ThreadLocalHostnameDelegatingTrustManager(new DisabledX509TrustManager(), "s3.amazonaws.com").getTarget()); assertEquals("cyberduck.s3.amazonaws.com", new ThreadLocalHostnameDelegatingTrustManager(new DisabledX509TrustManager(), "cyberduck.s3.amazonaws.com").getTarget()); assertEquals("cyber.duck.s3.amazonaws.com", new ThreadLocalHostnameDelegatingTrustManager(new DisabledX509TrustManager(), "cyber.duck.s3.amazonaws.com").getTarget()); }
@VisibleForTesting public static DataType inferWiderType(DataType lType, DataType rType) { // Ignore nullability during data type merge boolean nullable = lType.isNullable() || rType.isNullable(); lType = lType.notNull(); rType = rType.notNull(); DataType mergedType; if (lType.equals(rType)) { // identical type mergedType = rType; } else if (lType.is(DataTypeFamily.INTEGER_NUMERIC) && rType.is(DataTypeFamily.INTEGER_NUMERIC)) { mergedType = DataTypes.BIGINT(); } else if (lType.is(DataTypeFamily.CHARACTER_STRING) && rType.is(DataTypeFamily.CHARACTER_STRING)) { mergedType = DataTypes.STRING(); } else if (lType.is(DataTypeFamily.APPROXIMATE_NUMERIC) && rType.is(DataTypeFamily.APPROXIMATE_NUMERIC)) { mergedType = DataTypes.DOUBLE(); } else if (lType.is(DataTypeRoot.DECIMAL) && rType.is(DataTypeRoot.DECIMAL)) { // Merge two decimal types DecimalType lhsDecimal = (DecimalType) lType; DecimalType rhsDecimal = (DecimalType) rType; int resultIntDigits = Math.max( lhsDecimal.getPrecision() - lhsDecimal.getScale(), rhsDecimal.getPrecision() - rhsDecimal.getScale()); int resultScale = Math.max(lhsDecimal.getScale(), rhsDecimal.getScale()); mergedType = DataTypes.DECIMAL(resultIntDigits + resultScale, resultScale); } else if (lType.is(DataTypeRoot.DECIMAL) && rType.is(DataTypeFamily.EXACT_NUMERIC)) { // Merge decimal and int DecimalType lhsDecimal = (DecimalType) lType; mergedType = DataTypes.DECIMAL( Math.max( lhsDecimal.getPrecision(), lhsDecimal.getScale() + getNumericPrecision(rType)), lhsDecimal.getScale()); } else if (rType.is(DataTypeRoot.DECIMAL) && lType.is(DataTypeFamily.EXACT_NUMERIC)) { // Merge decimal and int DecimalType rhsDecimal = (DecimalType) rType; mergedType = DataTypes.DECIMAL( Math.max( rhsDecimal.getPrecision(), rhsDecimal.getScale() + getNumericPrecision(lType)), rhsDecimal.getScale()); } else { throw new IllegalStateException( String.format("Incompatible types: \"%s\" and \"%s\"", lType, rType)); } if (nullable) { return mergedType.nullable(); } else { return mergedType.notNull(); } }
@Test public void testInferWiderType() { Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.BINARY(17), DataTypes.BINARY(17))) .isEqualTo(DataTypes.BINARY(17)); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.VARBINARY(17), DataTypes.VARBINARY(17))) .isEqualTo(DataTypes.VARBINARY(17)); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.BYTES(), DataTypes.BYTES())) .isEqualTo(DataTypes.BYTES()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.BOOLEAN(), DataTypes.BOOLEAN())) .isEqualTo(DataTypes.BOOLEAN()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.INT())) .isEqualTo(DataTypes.INT()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.TINYINT(), DataTypes.TINYINT())) .isEqualTo(DataTypes.TINYINT()); Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.SMALLINT(), DataTypes.SMALLINT())) .isEqualTo(DataTypes.SMALLINT()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.BIGINT(), DataTypes.BIGINT())) .isEqualTo(DataTypes.BIGINT()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.FLOAT(), DataTypes.FLOAT())) .isEqualTo(DataTypes.FLOAT()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.DOUBLE(), DataTypes.DOUBLE())) .isEqualTo(DataTypes.DOUBLE()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.CHAR(17), DataTypes.CHAR(17))) .isEqualTo(DataTypes.CHAR(17)); Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.VARCHAR(17), DataTypes.VARCHAR(17))) .isEqualTo(DataTypes.VARCHAR(17)); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.STRING(), DataTypes.STRING())) .isEqualTo(DataTypes.STRING()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.DECIMAL(17, 7), DataTypes.DECIMAL(17, 7))) .isEqualTo(DataTypes.DECIMAL(17, 7)); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.DATE(), DataTypes.DATE())) .isEqualTo(DataTypes.DATE()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.TIME(), DataTypes.TIME())) .isEqualTo(DataTypes.TIME()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.TIME(6), DataTypes.TIME(6))) .isEqualTo(DataTypes.TIME(6)); Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.TIMESTAMP(), DataTypes.TIMESTAMP())) .isEqualTo(DataTypes.TIMESTAMP()); Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.TIMESTAMP(3), DataTypes.TIMESTAMP(3))) .isEqualTo(DataTypes.TIMESTAMP(3)); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.TIMESTAMP_TZ(), DataTypes.TIMESTAMP_TZ())) .isEqualTo(DataTypes.TIMESTAMP_TZ()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.TIMESTAMP_TZ(3), DataTypes.TIMESTAMP_TZ(3))) .isEqualTo(DataTypes.TIMESTAMP_TZ(3)); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.TIMESTAMP_LTZ(), DataTypes.TIMESTAMP_LTZ())) .isEqualTo(DataTypes.TIMESTAMP_LTZ()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.TIMESTAMP_LTZ(3), DataTypes.TIMESTAMP_LTZ(3))) .isEqualTo(DataTypes.TIMESTAMP_LTZ(3)); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.ARRAY(DataTypes.INT()), DataTypes.ARRAY(DataTypes.INT()))) .isEqualTo(DataTypes.ARRAY(DataTypes.INT())); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()), DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()))) .isEqualTo(DataTypes.MAP(DataTypes.INT(), DataTypes.STRING())); // Test compatible widening cast Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.BIGINT())) .isEqualTo(DataTypes.BIGINT()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.VARCHAR(17), DataTypes.STRING())) .isEqualTo(DataTypes.STRING()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.FLOAT(), DataTypes.DOUBLE())) .isEqualTo(DataTypes.DOUBLE()); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.DECIMAL(4, 0))) .isEqualTo(DataTypes.DECIMAL(10, 0)); Assertions.assertThat(SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.DECIMAL(10, 5))) .isEqualTo(DataTypes.DECIMAL(15, 5)); Assertions.assertThat( SchemaUtils.inferWiderType(DataTypes.BIGINT(), DataTypes.DECIMAL(10, 5))) .isEqualTo(DataTypes.DECIMAL(24, 5)); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.DECIMAL(5, 4), DataTypes.DECIMAL(10, 2))) .isEqualTo(DataTypes.DECIMAL(12, 4)); // Test merging with nullability Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.INT().notNull(), DataTypes.INT().notNull())) .isEqualTo(DataTypes.INT().notNull()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.INT().nullable(), DataTypes.INT().notNull())) .isEqualTo(DataTypes.INT().nullable()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.INT().notNull(), DataTypes.INT().nullable())) .isEqualTo(DataTypes.INT().nullable()); Assertions.assertThat( SchemaUtils.inferWiderType( DataTypes.INT().nullable(), DataTypes.INT().nullable())) .isEqualTo(DataTypes.INT().nullable()); // incompatible type merges test Assertions.assertThatThrownBy( () -> SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.DOUBLE())) .isExactlyInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy( () -> SchemaUtils.inferWiderType( DataTypes.DECIMAL(17, 0), DataTypes.DOUBLE())) .isExactlyInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy( () -> SchemaUtils.inferWiderType(DataTypes.INT(), DataTypes.STRING())) .isExactlyInstanceOf(IllegalStateException.class); }
public static <T extends Throwable> void checkNotContains(final Collection<?> values, final Object element, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (values.contains(element)) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckNotContainsToThrowsException() { assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkNotContains(Collections.singleton("foo"), "foo", SQLException::new)); }
public static Range<Comparable<?>> safeClosed(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) { try { return Range.closed(lowerEndpoint, upperEndpoint); } catch (final ClassCastException ex) { Optional<Class<?>> clazz = getTargetNumericType(Arrays.asList(lowerEndpoint, upperEndpoint)); if (!clazz.isPresent()) { throw ex; } return Range.closed(parseNumberByClazz(lowerEndpoint.toString(), clazz.get()), parseNumberByClazz(upperEndpoint.toString(), clazz.get())); } }
@Test void assertSafeClosedForFloat() { Range<Comparable<?>> range = SafeNumberOperationUtils.safeClosed(4.5F, 11.13F); assertThat(range.lowerEndpoint(), is(4.5F)); assertThat(range.upperEndpoint(), is(11.13F)); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenNoneOfRequiredGroupIsSetThrowsException() { GroupRequired groupRequired = PipelineOptionsFactory.as(GroupRequired.class); groupRequired.setRunner(CrashingRunner.class); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Missing required value for group [ham]"); expectedException.expectMessage("properties"); expectedException.expectMessage("getFoo"); expectedException.expectMessage("getBar"); PipelineOptionsValidator.validate(GroupRequired.class, groupRequired); }
public String getName() { return name; }
@Test void hasAName() throws Exception { assertThat(task.getName()) .isEqualTo("test"); }
public static GenericRow getIntermediateRow(final TableRow row) { final GenericKey key = row.key(); final GenericRow value = row.value(); final List<?> keyFields = key.values(); value.ensureAdditionalCapacity( 1 // ROWTIME + keyFields.size() //all the keys + row.window().map(w -> 2).orElse(0) //windows ); value.append(row.rowTime()); value.appendAll(keyFields); row.window().ifPresent(window -> { value.append(window.start().toEpochMilli()); value.append(window.end().toEpochMilli()); }); return value; }
@Test public void shouldReturnIntermediateRowWindowed() { // Given: final GenericRow intermediateRow1 = aValue .append(aRowtime) .append(aKey) .append(aWindow.start().toEpochMilli()) .append(aWindow.end().toEpochMilli()); final GenericRow intermediateRow2 = aValue2 .append(aRowtime) .append(aKey2) .append(aWindow.start().toEpochMilli()) .append(aWindow.end().toEpochMilli()); // When: final GenericRow genericRow1 = KsqlMaterialization.getIntermediateRow(windowedRow); final GenericRow genericRow2 = KsqlMaterialization.getIntermediateRow(windowedRow2); // Then: assertThat(genericRow1, is(intermediateRow1)); assertThat(genericRow2, is(intermediateRow2)); }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testPerQueueDisablePreemptionHierarchical() { int[][] qData = new int[][] { // / A D // B C E F { 200, 100, 50, 50, 100, 10, 90 }, // abs { 200, 200, 200, 200, 200, 200, 200 }, // maxCap { 200, 110, 60, 50, 90, 90, 0 }, // used { 10, 0, 0, 0, 10, 0, 10 }, // pending { 0, 0, 0, 0, 0, 0, 0 }, // reserved // appA appB appC appD { 4, 2, 1, 1, 2, 1, 1 }, // apps { -1, -1, 1, 1, -1, 1, 1 }, // req granularity { 2, 2, 0, 0, 2, 0, 0 }, // subqueues }; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // verify capacity taken from queueB (appA), not queueE (appC) despite // queueE being far over its absolute capacity because queueA (queueB's // parent) is over capacity and queueD (queueE's parent) is not. ApplicationAttemptId expectedAttemptOnQueueB = ApplicationAttemptId.newInstance( appA.getApplicationId(), appA.getAttemptId()); assertTrue("appA should be running on queueB", mCS.getAppsInQueue("queueB").contains(expectedAttemptOnQueueB)); verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA))); // Need to call setup() again to reset mDisp setup(); // Turn off preemption for queueB and it's children conf.setPreemptionDisabled(QUEUE_A_QUEUE_B, true); ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData); policy2.editSchedule(); ApplicationAttemptId expectedAttemptOnQueueC = ApplicationAttemptId.newInstance( appB.getApplicationId(), appB.getAttemptId()); ApplicationAttemptId expectedAttemptOnQueueE = ApplicationAttemptId.newInstance( appC.getApplicationId(), appC.getAttemptId()); // Now, all of queueB's (appA) over capacity is not preemptable, so neither // is queueA's. Verify that capacity is taken from queueE (appC). assertTrue("appB should be running on queueC", mCS.getAppsInQueue("queueC").contains(expectedAttemptOnQueueC)); assertTrue("appC should be running on queueE", mCS.getAppsInQueue("queueE").contains(expectedAttemptOnQueueE)); // Resources should have come from queueE (appC) and neither of queueA's // children. verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC))); }
public static void copy( List<ResourceId> srcResourceIds, List<ResourceId> destResourceIds, MoveOptions... moveOptions) throws IOException { validateSrcDestLists(srcResourceIds, destResourceIds); if (srcResourceIds.isEmpty()) { return; } FileSystem fileSystem = getFileSystemInternal(srcResourceIds.iterator().next().getScheme()); FilterResult filtered = filterFiles(fileSystem, srcResourceIds, destResourceIds, moveOptions); if (!filtered.resultSources.isEmpty()) { fileSystem.copy(filtered.resultSources, filtered.resultDestinations); } }
@Test public void testCopyIgnoreMissingFiles() throws Exception { Path srcPath1 = temporaryFolder.newFile().toPath(); Path nonExistentPath = srcPath1.resolveSibling("non-existent"); Path srcPath3 = temporaryFolder.newFile().toPath(); Path destPath1 = srcPath1.resolveSibling("dest1"); Path destPath2 = nonExistentPath.resolveSibling("dest2"); Path destPath3 = srcPath1.resolveSibling("dest3"); createFileWithContent(srcPath1, "content1"); createFileWithContent(srcPath3, "content3"); FileSystems.copy( toResourceIds( ImmutableList.of(srcPath1, nonExistentPath, srcPath3), false /* isDirectory */), toResourceIds(ImmutableList.of(destPath1, destPath2, destPath3), false /* isDirectory */), MoveOptions.StandardMoveOptions.IGNORE_MISSING_FILES); assertTrue(srcPath1.toFile().exists()); assertTrue(srcPath3.toFile().exists()); assertThat( Files.readLines(srcPath1.toFile(), StandardCharsets.UTF_8), containsInAnyOrder("content1")); assertFalse(destPath2.toFile().exists()); assertThat( Files.readLines(srcPath3.toFile(), StandardCharsets.UTF_8), containsInAnyOrder("content3")); }
@Override public void encode(final Event event, final OutputStream out) throws IOException { encodeMetricIn.increment(); encodeMetricTime.time(() -> codec.encode(event, out)); }
@Test public void encodeIncrementsEventCount() throws IOException { codec = new AbstractCodec() { @Override public void encode(final Event event, final OutputStream out) {} }; final JavaCodecDelegator codecDelegator = constructCodecDelegator(); codecDelegator.encode(new org.logstash.Event(), new ByteArrayOutputStream()); assertEquals(1, getMetricLongValue("encode", "writes_in")); }
public static ApplicationId from(ApplicationIdConfig config) { return from(TenantName.from(config.tenant()), ApplicationName.from(config.application()), InstanceName.from(config.instance())); }
@Test void require_that_compare_to_is_correct() { new TotalOrderTester<ApplicationId>() .theseObjects(from("tenant1", "name1", "instance1"), from("tenant1", "name1", "instance1")) .areLessThan(from("tenant2", "name1", "instance1")) .areLessThan(from("tenant2", "name2", "instance1")) .areLessThan(from("tenant2", "name2", "instance2")) .testOrdering(); }
@Override public String taskKey() { return KeyBuilder.buildServiceMetaKey(client.getClientId(), String.valueOf(client.isEphemeral())); }
@Test void testTaskKey() { assertEquals(KeyBuilder.buildServiceMetaKey(CLIENT_ID, "true"), beatCheckTask.taskKey()); }
@RequestMapping(path = "getUserRole", method = RequestMethod.GET) public Message getWorkspaceUserRole(@RequestParam(name = "userName") String username) { String token = ModuleUserUtils.getToken(httpServletRequest); if (StringUtils.isNotBlank(token)) { if(!token.equals(HPMS_USER_TOKEN)){ return Message.error("Token:" + token + " has no permission to get user info."); } }else { return Message.error("User:" + username + " has no permission to get user info."); } List<DSSWorkspaceRoleVO> userRoles = dssWorkspaceUserService.getUserRoleByUserName(username); return Message.ok().data("userName", username).data("roleInfo", userRoles); }
@Order ( 3 ) @Test void getWorkspaceUserRole2() { getWorkspaceUserRole(); }
void processPortAdded(Port port) { // TODO check the node state is COMPLETE org.openstack4j.model.network.Port osPort = osNetworkService.port(port); if (osPort == null) { log.warn(ERR_ADD_HOST + "OpenStack port for {} not found", port); return; } Network osNet = osNetworkService.network(osPort.getNetworkId()); if (osNet == null) { log.warn(ERR_ADD_HOST + "OpenStack network {} not found", osPort.getNetworkId()); return; } if (osPort.getFixedIps().isEmpty()) { log.warn(ERR_ADD_HOST + "no fixed IP for port {}", osPort.getId()); return; } MacAddress mac = MacAddress.valueOf(osPort.getMacAddress()); HostId hostId = HostId.hostId(mac); /* typically one openstack port should only be bound to one fix IP address; however, openstack4j binds multiple fixed IPs to one port, this might be a defect of openstack4j implementation */ // TODO: we need to find a way to bind multiple ports from multiple // openstack networks into one host sooner or later Set<IpAddress> fixedIps = osPort.getFixedIps().stream() .map(ip -> IpAddress.valueOf(ip.getIpAddress())) .collect(Collectors.toSet()); // connect point is the combination of switch ID with port number where // the host is attached to ConnectPoint connectPoint = new ConnectPoint(port.element().id(), port.number()); long createTime = System.currentTimeMillis(); // we check whether the host already attached to same locations Host host = hostService.getHost(hostId); // build host annotations to include a set of meta info from neutron DefaultAnnotations.Builder annotations = DefaultAnnotations.builder() .set(ANNOTATION_NETWORK_ID, osPort.getNetworkId()) .set(ANNOTATION_PORT_ID, osPort.getId()) .set(ANNOTATION_CREATE_TIME, String.valueOf(createTime)); // FLAT typed network does not require segment ID Type netType = osNetworkService.networkType(osNet.getId()); if (netType != FLAT) { annotations.set(ANNOTATION_SEGMENT_ID, osNet.getProviderSegID()); } // build host description object HostDescription hostDesc = new DefaultHostDescription( mac, VlanId.NONE, new HostLocation(connectPoint, createTime), fixedIps, annotations.build()); if (host != null) { Set<HostLocation> locations = host.locations().stream() .filter(l -> l.deviceId().equals(connectPoint.deviceId())) .filter(l -> l.port().equals(connectPoint.port())) .collect(Collectors.toSet()); // newly added location is not in the existing location list, // therefore, we simply add this into the location list if (locations.isEmpty()) { hostProviderService.addLocationToHost(hostId, new HostLocation(connectPoint, createTime)); } // newly added location is in the existing location list, // the hostDetected method invocation in turn triggers host Update event if (locations.size() == 1) { hostProviderService.hostDetected(hostId, hostDesc, false); } } else { hostProviderService.hostDetected(hostId, hostDesc, false); } }
@Test public void testProcessPortAddedForMigration() { org.onosproject.net.Port port = new DefaultPort(DEV1, P2, true, ANNOTATIONS); target.processPortAdded(port); HostId hostId = HostId.hostId(HOST_MAC); verifyHostLocationResult(hostId, HOST_LOC12); }
private boolean isNotEmptyConfig() { return header.isNotEmptyConfig() || parameter.isNotEmptyConfig() || cookie.isNotEmptyConfig(); }
@Test public void testShenyuRequestHeader() { RequestHandle handle = new RequestHandle(); RequestHandle.ShenyuRequestHeader header = handle.new ShenyuRequestHeader( ImmutableMap.of("addKey", "addValue"), ImmutableMap.of("replaceKey", "newKey"), ImmutableMap.of("setKey", "newValue"), Sets.newSet("removeKey") ); assertThat(header.isNotEmptyConfig(), is(true)); assertThat(header.getAddHeaders(), hasEntry("addKey", "addValue")); assertThat(header.getReplaceHeaderKeys(), hasEntry("replaceKey", "newKey")); assertThat(header.getSetHeaders(), hasEntry("setKey", "newValue")); assertThat(header.getRemoveHeaderKeys(), hasItems("removeKey")); RequestHandle.ShenyuRequestHeader header1 = handle.new ShenyuRequestHeader(); header1.setAddHeaders(ImmutableMap.of("addKey", "addValue")); header1.setReplaceHeaderKeys(ImmutableMap.of("replaceKey", "newKey")); header1.setSetHeaders(ImmutableMap.of("setKey", "newValue")); header1.setRemoveHeaderKeys(ImmutableSet.of("removeKey")); assertThat(ImmutableSet.of(header, header1), hasSize(1)); }
public static <T> List<T> sub(List<T> list, int start, int end) { return ListUtil.sub(list, start, end); }
@Test public void subInput1PositiveNegativeZeroOutput0() { // Arrange final List<Integer> list = new ArrayList<>(); list.add(null); final int start = 1; final int end = -2_147_483_648; final int step = 0; // Act final List<Integer> retval = CollUtil.sub(list, start, end, step); // Assert result final List<Integer> arrayList = new ArrayList<>(); assertEquals(arrayList, retval); }
@Override public int compare(String version1, String version2) { if(ObjectUtil.equal(version1, version2)) { return 0; } if (version1 == null && version2 == null) { return 0; } else if (version1 == null) {// null或""视为最小版本,排在前 return -1; } else if (version2 == null) { return 1; } return CompareUtil.compare(Version.of(version1), Version.of(version2)); }
@Test public void compareEmptyTest() { int compare = VersionComparator.INSTANCE.compare("", "1.12.1"); assertTrue(compare < 0); compare = VersionComparator.INSTANCE.compare("", null); assertTrue(compare > 0); compare = VersionComparator.INSTANCE.compare(null, ""); assertTrue(compare < 0); }
@Override public PCollectionsImmutableNavigableSet<E> removed(E e) { return new PCollectionsImmutableNavigableSet<>(underlying().minus(e)); }
@Test public void testDelegationOfRemoved() { new PCollectionsTreeSetWrapperDelegationChecker<>() .defineMockConfigurationForFunctionInvocation(mock -> mock.minus(eq(10)), SINGLETON_SET) .defineWrapperFunctionInvocationAndMockReturnValueTransformation(wrapper -> wrapper.removed(10), identity()) .expectWrapperToWrapMockFunctionReturnValue() .doFunctionDelegationCheck(); }
@GET @Operation(summary = "List all active connectors") public Response listConnectors( final @Context UriInfo uriInfo, final @Context HttpHeaders headers ) { if (uriInfo.getQueryParameters().containsKey("expand")) { Map<String, Map<String, Object>> out = new HashMap<>(); for (String connector : herder.connectors()) { try { Map<String, Object> connectorExpansions = new HashMap<>(); for (String expansion : uriInfo.getQueryParameters().get("expand")) { switch (expansion) { case "status": connectorExpansions.put("status", herder.connectorStatus(connector)); break; case "info": connectorExpansions.put("info", herder.connectorInfo(connector)); break; default: log.info("Ignoring unknown expansion type {}", expansion); } } out.put(connector, connectorExpansions); } catch (NotFoundException e) { // this likely means that a connector has been removed while we look its info up // we can just not include this connector in the return entity log.debug("Unable to get connector info for {} on this worker", connector); } } return Response.ok(out).build(); } else { return Response.ok(herder.connectors()).build(); } }
@Test public void testExpandConnectorsWithConnectorNotFound() { when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class); when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2); doThrow(mock(NotFoundException.class)).when(herder).connectorStatus(CONNECTOR_NAME); forward = mock(UriInfo.class); MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<>(); queryParams.putSingle("expand", "status"); when(forward.getQueryParameters()).thenReturn(queryParams); Map<String, Map<String, Object>> expanded = (Map<String, Map<String, Object>>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets assertEquals(Collections.singleton(CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); }
@Override public Optional<String> getUrlPathToJs() { return Optional.ofNullable(analytics) .map(WebAnalytics::getUrlPathToJs) .filter(path -> !path.startsWith("/") && !path.contains("..") && !path.contains("://")) .map(path -> "/" + path); }
@Test public void return_js_path_if_analytics_plugin_is_installed() { WebAnalytics analytics = newWebAnalytics("api/google/analytics"); WebAnalyticsLoaderImpl underTest = new WebAnalyticsLoaderImpl(new WebAnalytics[] {analytics}); assertThat(underTest.getUrlPathToJs()).hasValue("/api/google/analytics"); }
@Override public PendingSplitsCheckpoint<FileSourceSplit> snapshotState(long checkpointId) { return PendingSplitsCheckpoint.fromCollectionSnapshot(splitAssigner.remainingSplits()); }
@Test void testCheckpointNoSplitRequested() throws Exception { final TestingSplitEnumeratorContext<FileSourceSplit> context = new TestingSplitEnumeratorContext<>(4); final FileSourceSplit split = createRandomSplit(); final StaticFileSplitEnumerator enumerator = createEnumerator(context, split); final PendingSplitsCheckpoint<FileSourceSplit> checkpoint = enumerator.snapshotState(1L); assertThat(checkpoint.getSplits()).contains(split); }
public static Method getApplyMethod(ScalarFn scalarFn) { Class<? extends ScalarFn> clazz = scalarFn.getClass(); Collection<Method> matches = ReflectHelpers.declaredMethodsWithAnnotation( ScalarFn.ApplyMethod.class, clazz, ScalarFn.class); if (matches.isEmpty()) { throw new IllegalArgumentException( String.format( "No method annotated with @%s found in class %s.", ScalarFn.ApplyMethod.class.getSimpleName(), clazz.getName())); } // If we have at least one match, then either it should be the only match // or it should be an extension of the other matches (which came from parent // classes). Method first = matches.iterator().next(); for (Method other : matches) { if (!first.getName().equals(other.getName()) || !Arrays.equals(first.getParameterTypes(), other.getParameterTypes())) { throw new IllegalArgumentException( String.format( "Found multiple methods annotated with @%s. [%s] and [%s]", ScalarFn.ApplyMethod.class.getSimpleName(), ReflectHelpers.formatMethod(first), ReflectHelpers.formatMethod(other))); } } // Method must be public. if ((first.getModifiers() & Modifier.PUBLIC) == 0) { throw new IllegalArgumentException( String.format("Method %s is not public.", ReflectHelpers.formatMethod(first))); } return first; }
@Test @SuppressWarnings("nullness") // If result is null, test will fail as expected. public void testGetApplyMethodStatic() throws InvocationTargetException, IllegalAccessException { Method method = ScalarFnReflector.getApplyMethod(new IncrementFnWithStaticMethod()); @Nullable Object result = method.invoke(null, Long.valueOf(24L)); assertEquals(Long.valueOf(25L), result); }
@Override public synchronized KinesisMessageBatch fetchMessages(StreamPartitionMsgOffset startMsgOffset, int timeoutMs) { try { return getKinesisMessageBatch((KinesisPartitionGroupOffset) startMsgOffset); } catch (ProvisionedThroughputExceededException pte) { LOGGER.error("Rate limit exceeded while fetching messages from Kinesis stream: {} with threshold: {}", pte.getMessage(), _config.getRpsLimit()); return new KinesisMessageBatch(List.of(), (KinesisPartitionGroupOffset) startMsgOffset, false); } }
@Test public void testBasicConsumer() { KinesisClient kinesisClient = mock(KinesisClient.class); when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn( GetShardIteratorResponse.builder().shardIterator(PLACEHOLDER).build()); when(kinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn( GetRecordsResponse.builder().nextShardIterator(PLACEHOLDER).records(_records).build()); KinesisConsumer kinesisConsumer = new KinesisConsumer(_kinesisConfig, kinesisClient); // Fetch first batch KinesisPartitionGroupOffset startOffset = new KinesisPartitionGroupOffset("0", "1"); KinesisMessageBatch kinesisMessageBatch = kinesisConsumer.fetchMessages(startOffset, TIMEOUT); assertEquals(kinesisMessageBatch.getMessageCount(), NUM_RECORDS); for (int i = 0; i < NUM_RECORDS; i++) { assertEquals(baToString(kinesisMessageBatch.getStreamMessage(i).getValue()), DUMMY_RECORD_PREFIX + i); } assertFalse(kinesisMessageBatch.isEndOfPartitionGroup()); // Fetch second batch kinesisMessageBatch = kinesisConsumer.fetchMessages(kinesisMessageBatch.getOffsetOfNextBatch(), TIMEOUT); assertEquals(kinesisMessageBatch.getMessageCount(), NUM_RECORDS); for (int i = 0; i < NUM_RECORDS; i++) { assertEquals(baToString(kinesisMessageBatch.getStreamMessage(i).getValue()), DUMMY_RECORD_PREFIX + i); } assertFalse(kinesisMessageBatch.isEndOfPartitionGroup()); // Expect only 1 call to get shard iterator and 2 calls to get records verify(kinesisClient, times(1)).getShardIterator(any(GetShardIteratorRequest.class)); verify(kinesisClient, times(2)).getRecords(any(GetRecordsRequest.class)); }
boolean isInsideOpenOpen(Number toEvaluate) { if (leftMargin == null) { return toEvaluate.doubleValue() < rightMargin.doubleValue(); } else if (rightMargin == null) { return toEvaluate.doubleValue() > leftMargin.doubleValue(); } else { return toEvaluate.doubleValue() > leftMargin.doubleValue() && toEvaluate.doubleValue() < rightMargin.doubleValue(); } }
@Test void isInsideOpenOpen() { KiePMMLInterval kiePMMLInterval = new KiePMMLInterval(null, 20, CLOSURE.OPEN_OPEN); assertThat(kiePMMLInterval.isInsideOpenOpen(10)).isTrue(); assertThat(kiePMMLInterval.isInsideOpenOpen(20)).isFalse(); assertThat(kiePMMLInterval.isInsideOpenOpen(30)).isFalse(); kiePMMLInterval = new KiePMMLInterval(20, null, CLOSURE.OPEN_OPEN); assertThat(kiePMMLInterval.isInsideOpenOpen(30)).isTrue(); assertThat(kiePMMLInterval.isInsideOpenOpen(20)).isFalse(); assertThat(kiePMMLInterval.isInsideOpenOpen(10)).isFalse(); kiePMMLInterval = new KiePMMLInterval(20, 40, CLOSURE.OPEN_OPEN); assertThat(kiePMMLInterval.isInsideOpenOpen(30)).isTrue(); assertThat(kiePMMLInterval.isInsideOpenOpen(10)).isFalse(); assertThat(kiePMMLInterval.isInsideOpenOpen(20)).isFalse(); assertThat(kiePMMLInterval.isInsideOpenOpen(40)).isFalse(); assertThat(kiePMMLInterval.isInsideOpenOpen(50)).isFalse(); }
@Deprecated public static String[] createFilePathList( VariableSpace space, String[] fileName, String[] fileMask, String[] excludeFileMask, String[] fileRequired ) { return createFilePathList( DefaultBowl.getInstance(), space, fileName, fileMask, excludeFileMask, fileRequired ); }
@Test public void testSpecialCharsInFileNamesDefaultBehavior() throws IOException, KettleException { String fileNameWithSpaces = "file name with spaces"; tempFolder.newFile( fileNameWithSpaces ); VariableSpace spaceMock = mock( VariableSpace.class ); when( spaceMock.environmentSubstitute( any( String[].class ) ) ).thenAnswer( (Answer<String[]>) invocationOnMock -> (String[]) invocationOnMock.getArguments()[ 0 ] ); String[] folderNameList = { tempFolder.getRoot().getPath() }; String[] emptyStringArray = { "" }; boolean[] fileRequiredList = { true }; String[] paths = FileInputList .createFilePathList( spaceMock, folderNameList, emptyStringArray, emptyStringArray, emptyStringArray, fileRequiredList ); assertTrue( "File with spaces not found", paths[ 0 ].endsWith( fileNameWithSpaces ) ); }
static boolean compareSignatures(Map<String, JavaClassSignature> expected, Map<String, JavaClassSignature> actual, Log log) { return SetMatcher.compare(expected.keySet(), actual.keySet(), item -> matchingClasses(item, expected.get(item), actual.get(item), log), item -> log.error(String.format("Missing class: %s", item)), item -> log.error(String.format("Extra class: %s", item))); }
@Test public void testCompareSignatures() { Log log = mock(Log.class); JavaClassSignature signatureA = new JavaClassSignature( "java.lang.Object", Set.of(), List.of("public"), Set.of("public void foo()"), Set.of("public int bar")); JavaClassSignature signatureB = new JavaClassSignature( "java.lang.Exception", Set.of("java.lang.Runnable"), List.of("protected"), Set.of("public void foo(int)"), Set.of("public boolean bar")); Map<String, JavaClassSignature> expected = ImmutableMap.<String, JavaClassSignature>builder() .put("test.Missing", signatureA) .put("test.A", signatureA) .put("test.B", signatureB) .build(); Map<String, JavaClassSignature> actual = ImmutableMap.<String, JavaClassSignature>builder() .put("test.A", signatureA) .put("test.Extra", signatureA) .put("test.B", signatureA) .build(); assertThat(AbiCheck.compareSignatures(expected, actual, log), equalTo(false)); verify(log).error("Missing class: test.Missing"); verify(log).error("Extra class: test.Extra"); verify(log) .error("Class test.B: Expected superclass java.lang.Exception, found java.lang.Object"); verify(log).error("Class test.B: Missing interface java.lang.Runnable"); verify(log).error("Class test.B: Missing attribute protected"); verify(log).error("Class test.B: Extra attribute public"); verify(log).error("Class test.B: Missing method public void foo(int)"); verify(log).error("Class test.B: Extra method public void foo()"); verify(log).error("Class test.B: Missing field public boolean bar"); verify(log).error("Class test.B: Extra field public int bar"); }
public List<String> build() { switch (dialect.getId()) { case PostgreSql.ID: StringBuilder sql = new StringBuilder().append(ALTER_TABLE).append(tableName).append(" "); dropColumns(sql, "DROP COLUMN ", columns); return Collections.singletonList(sql.toString()); case MsSql.ID: return Collections.singletonList(getMsSQLStatement(columns)); case Oracle.ID: return Collections.singletonList(getOracleStatement()); case H2.ID: return Arrays.stream(columns).map(this::getMsSQLStatement).toList(); default: throw new IllegalStateException(String.format("Unsupported database '%s'", dialect.getId())); } }
@Test public void drop_columns_on_h2() { assertThat(new DropColumnsBuilder(new H2(), "issues", "date_in_ms", "name").build()) .containsOnly( "ALTER TABLE issues DROP COLUMN date_in_ms", "ALTER TABLE issues DROP COLUMN name"); }
@GetInitialRestriction public OffsetRange initialRestriction(@Element KafkaSourceDescriptor kafkaSourceDescriptor) { Map<String, Object> updatedConsumerConfig = overrideBootstrapServersConfig(consumerConfig, kafkaSourceDescriptor); TopicPartition partition = kafkaSourceDescriptor.getTopicPartition(); LOG.info("Creating Kafka consumer for initial restriction for {}", partition); try (Consumer<byte[], byte[]> offsetConsumer = consumerFactoryFn.apply(updatedConsumerConfig)) { ConsumerSpEL.evaluateAssign(offsetConsumer, ImmutableList.of(partition)); long startOffset; @Nullable Instant startReadTime = kafkaSourceDescriptor.getStartReadTime(); if (kafkaSourceDescriptor.getStartReadOffset() != null) { startOffset = kafkaSourceDescriptor.getStartReadOffset(); } else if (startReadTime != null) { startOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, startReadTime); } else { startOffset = offsetConsumer.position(partition); } long endOffset = Long.MAX_VALUE; @Nullable Instant stopReadTime = kafkaSourceDescriptor.getStopReadTime(); if (kafkaSourceDescriptor.getStopReadOffset() != null) { endOffset = kafkaSourceDescriptor.getStopReadOffset(); } else if (stopReadTime != null) { endOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, stopReadTime); } new OffsetRange(startOffset, endOffset); Lineage.getSources() .add( "kafka", ImmutableList.of( (String) updatedConsumerConfig.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), MoreObjects.firstNonNull(kafkaSourceDescriptor.getTopic(), partition.topic()))); return new OffsetRange(startOffset, endOffset); } }
@Test public void testInitialRestrictionWhenHasStartTime() throws Exception { long expectedStartOffset = 10L; Instant startReadTime = Instant.now(); consumer.setStartOffsetForTime(expectedStartOffset, startReadTime); consumer.setCurrentPos(5L); OffsetRange result = dofnInstance.initialRestriction( KafkaSourceDescriptor.of( topicPartition, null, startReadTime, null, null, ImmutableList.of())); assertEquals(new OffsetRange(expectedStartOffset, Long.MAX_VALUE), result); }
public Map<String, Integer> classicMembersSupportedProtocols() { return Collections.unmodifiableMap(classicProtocolMembersSupportedProtocols); }
@Test public void testClassicMembersSupportedProtocols() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> rangeProtocol = new ArrayList<>(); rangeProtocol.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> roundRobinAndRangeProtocols = new ArrayList<>(); roundRobinAndRangeProtocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("roundrobin") .setMetadata(new byte[0])); roundRobinAndRangeProtocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member-1") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(rangeProtocol)) .build(); consumerGroup.updateMember(member1); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member-2") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(1, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("range", "sticky")))); assertFalse(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("sticky", "roundrobin")))); member2 = new ConsumerGroupMember.Builder(member2) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(rangeProtocol)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertFalse(consumerGroup.classicMembersSupportedProtocols().containsKey("roundrobin")); member1 = new ConsumerGroupMember.Builder(member1) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member1); member2 = new ConsumerGroupMember.Builder(member2) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(roundRobinAndRangeProtocols)) .build(); consumerGroup.updateMember(member2); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("sticky", "roundrobin")))); }
public RegisterTypeCommand create(final RegisterType statement) { final String name = statement.getName(); final boolean ifNotExists = statement.getIfNotExists(); final SqlType type = statement.getType().getSqlType(); if (!ifNotExists && metaStore.resolveType(name).isPresent()) { throw new KsqlException( "Cannot register custom type '" + name + "' " + "since it is already registered with type: " + metaStore.resolveType(name).get() ); } return new RegisterTypeCommand(type, name); }
@Test public void shouldThrowOnRegisterExistingTypeWhenIfNotExistsNotSet() { // Given: final RegisterType ddlStatement = new RegisterType( Optional.empty(), EXISTING_TYPE, new Type(SqlStruct.builder().field("foo", SqlPrimitiveType.of(SqlBaseType.STRING)).build()), false ); // When: final Exception e = assertThrows( KsqlException.class, () -> factory.create(ddlStatement) ); // Then: assertThat( e.getMessage(), equalTo("Cannot register custom type '" + EXISTING_TYPE + "' since it is already registered with type: " + customType) ); }
public static boolean checkUrl(final String url) { return checkUrl(url, DEFAULT_TIMEOUT); }
@Test public void testBlank() { assertFalse(UpstreamCheckUtils.checkUrl("")); }
@Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { String metricsName = getMetricsName(remaining); MetricsType metricsType = getMetricsType(remaining); LOG.debug("Metrics type: {}; name: {}", metricsType, metricsName); Endpoint endpoint = new MetricsEndpoint(uri, this, metricRegistry, metricsType, metricsName); setProperties(endpoint, parameters); return endpoint; }
@Test public void testCreateEndpoint() throws Exception { component.setCamelContext(camelContext); when(camelContext.getRegistry()).thenReturn(camelRegistry); when(camelContext.resolvePropertyPlaceholders(anyString())).then(returnsFirstArg()); when(camelRegistry.lookupByNameAndType(MetricsComponent.METRIC_REGISTRY_NAME, MetricRegistry.class)) .thenReturn(metricRegistry); when(camelContext.getCamelContextExtension()).thenReturn(ecc); when(PluginHelper.getBeanIntrospection(ecc)).thenReturn(new DefaultBeanIntrospection()); when(PluginHelper.getConfigurerResolver(ecc)).thenReturn((name, context) -> null); Map<String, Object> params = new HashMap<>(); Long value = System.currentTimeMillis(); params.put("mark", value); component.init(); Endpoint result = component.createEndpoint("metrics:meter:long.meter", "meter:long.meter", params); assertThat(result, is(notNullValue())); assertThat(result, is(instanceOf(MetricsEndpoint.class))); MetricsEndpoint me = (MetricsEndpoint) result; assertThat(me.getMark(), is(value)); assertThat(me.getMetricsName(), is("long.meter")); assertThat(me.getRegistry(), is(metricRegistry)); inOrder.verify(camelContext, times(1)).getRegistry(); inOrder.verify(camelRegistry, times(1)).lookupByNameAndType(MetricsComponent.METRIC_REGISTRY_NAME, MetricRegistry.class); inOrder.verify(camelContext, times(1)).getTypeConverter(); inOrder.verifyNoMoreInteractions(); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDecodeMultipleDynamicStaticNestedStructs() { String rawInput = "0000000000000000000000000000000000000000000000000000000000000240" + "000000000000000000000000000000000000000000000000000000000000007b" + "000000000000000000000000000000000000000000000000000000000000007b" + "000000000000000000000000000000000000000000000000000000000000007b" + "000000000000000000000000000000000000000000000000000000000000007b" + "00000000000000000000000000000000000000000000000000000000000004d1" + "0000000000000000000000000000000000000000000000000000000000000079" + "0000000000000000000000000000000000000000000000000000000000000002" + "0000000000000000000000000000000000000000000000000000000000000340" + "0000000000000000000000000000000000000000000000000000000000000400" + "00000000000000000000000000000000000000000000000000000000000004d1" + "0000000000000000000000000000000000000000000000000000000000000079" + "0000000000000000000000000000000000000000000000000000000000000002" + "0000000000000000000000000000000000000000000000000000000000000500" + "00000000000000000000000000000000000000000000000000000000000005a0" + "00000000000000000000000000000000000000000000000000000000000004d1" + "0000000000000000000000000000000000000000000000000000000000000079" + "0000000000000000000000000000000000000000000000000000000000000002" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000001" + "3400000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000009" + "6e6573746564466f6f0000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000004" + "6861686100000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "686f686f00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000001" + "3400000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000009" + "6e6573746564466f6f0000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000060" + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000004" + "6861686100000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "686f686f00000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getNarBarBarFuzzFooNarFuzzNuuFooFuzzFunction .getOutputParameters()), Arrays.asList( new AbiV2TestFixture.Nar( new AbiV2TestFixture.Nuu( new AbiV2TestFixture.Foo("4", "nestedFoo"))), new AbiV2TestFixture.Bar(BigInteger.valueOf(123), BigInteger.valueOf(123)), new AbiV2TestFixture.Bar(BigInteger.valueOf(123), BigInteger.valueOf(123)), new AbiV2TestFixture.Fuzz( new AbiV2TestFixture.Bar( BigInteger.valueOf(1233), BigInteger.valueOf(121)), BigInteger.valueOf(2)), new AbiV2TestFixture.Foo("haha", "hoho"), new AbiV2TestFixture.Nar( new AbiV2TestFixture.Nuu( new AbiV2TestFixture.Foo("4", "nestedFoo"))), new AbiV2TestFixture.Fuzz( new AbiV2TestFixture.Bar( BigInteger.valueOf(1233), BigInteger.valueOf(121)), BigInteger.valueOf(2)), new AbiV2TestFixture.Nuu(new AbiV2TestFixture.Foo("", "")), new AbiV2TestFixture.Foo("haha", "hoho"), new AbiV2TestFixture.Fuzz( new AbiV2TestFixture.Bar( BigInteger.valueOf(1233), BigInteger.valueOf(121)), BigInteger.valueOf(2)))); }
public static String toString(RedisCommand<?> command, Object... params) { if (RedisCommands.AUTH.equals(command)) { return "command: " + command + ", params: (password masked)"; } return "command: " + command + ", params: " + LogHelper.toString(params); }
@Test public void toStringWithSmallArrays() { String[] strings = new String[] { "0" }; int[] ints = new int[] { 1 }; long[] longs = new long[] { 2L }; double[] doubles = new double[] { 3.1D }; float[] floats = new float[] { 4.2F }; byte[] bytes = new byte[] { (byte) 5 }; char[] chars = new char[] { '6' }; assertThat(LogHelper.toString(strings)).isEqualTo("[0]"); assertThat(LogHelper.toString(ints)).isEqualTo("[1]"); assertThat(LogHelper.toString(longs)).isEqualTo("[2]"); assertThat(LogHelper.toString(doubles)).isEqualTo("[3.1]"); assertThat(LogHelper.toString(floats)).isEqualTo("[4.2]"); assertThat(LogHelper.toString(bytes)).isEqualTo("[5]"); assertThat(LogHelper.toString(chars)).isEqualTo("[6]"); }
public static ParsedCommand parse( // CHECKSTYLE_RULES.ON: CyclomaticComplexity final String sql, final Map<String, String> variables) { validateSupportedStatementType(sql); final String substituted; try { substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables); } catch (ParseFailedException e) { throw new MigrationException(String.format( "Failed to parse the statement. Statement: %s. Reason: %s", sql, e.getMessage())); } final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted) .get(0).getStatement(); final boolean isStatement = StatementType.get(statementContext.statement().getClass()) == StatementType.STATEMENT; return new ParsedCommand(substituted, isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY) .buildStatement(statementContext))); }
@Test public void shouldThrowOnSelectStatement() { // When: final MigrationException e = assertThrows(MigrationException.class, () -> parse("select * from my_stream emit changes;")); // Then: assertThat(e.getMessage(), is("'SELECT' statements are not supported.")); }
@PublicAPI(usage = ACCESS) public static List<String> formatNamesOf(Class<?>... paramTypes) { return formatNamesOf(copyOf(paramTypes)); }
@Test public void formatNamesOf() { assertThat(Formatters.formatNamesOf()).isEmpty(); assertThat(Formatters.formatNamesOf(List.class, Iterable.class, String.class)) .containsExactly(List.class.getName(), Iterable.class.getName(), String.class.getName()); assertThat(Formatters.formatNamesOf(emptyList())).isEmpty(); assertThat(Formatters.formatNamesOf(of(List.class, Iterable.class, String.class))) .containsExactly(List.class.getName(), Iterable.class.getName(), String.class.getName()); // special case because the inferred type of the list is List<Class<? extends Serializable>> assertThat(Formatters.formatNamesOf(of(String.class, Serializable.class))) .containsExactly(String.class.getName(), Serializable.class.getName()); }
public static byte[] getBytesWithoutClosing(InputStream stream) throws IOException { if (stream instanceof ExposedByteArrayInputStream) { // Fast path for the exposed version. return ((ExposedByteArrayInputStream) stream).readAll(); } else if (stream instanceof ByteArrayInputStream) { // Fast path for ByteArrayInputStream. byte[] ret = new byte[stream.available()]; stream.read(ret); return ret; } // Falls back to normal stream copying. SoftReference<byte[]> refBuffer = threadLocalBuffer.get(); byte[] buffer = refBuffer == null ? null : refBuffer.get(); if (buffer == null) { buffer = new byte[BUF_SIZE]; threadLocalBuffer.set(new SoftReference<>(buffer)); } ByteArrayOutputStream outStream = new ByteArrayOutputStream(); while (true) { int r = stream.read(buffer); if (r == -1) { break; } outStream.write(buffer, 0, r); } return outStream.toByteArray(); }
@Test public void testGetBytesFromByteArrayInputStream() throws IOException { InputStream stream = new ByteArrayInputStream(testData); byte[] bytes = StreamUtils.getBytesWithoutClosing(stream); assertArrayEquals(testData, bytes); assertEquals(0, stream.available()); }
@Override public List<TypeInfo> getColumnTypes(Configuration conf) throws HiveJdbcDatabaseAccessException { return getColumnMetadata(conf, typeInfoTranslator); }
@Test public void testGetColumnTypes_starQuery_allTypes() throws HiveJdbcDatabaseAccessException { Configuration conf = buildConfiguration(); conf.set(JdbcStorageConfig.QUERY.getPropertyName(), "select * from all_types_table"); DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf); List<TypeInfo> expectedTypes = new ArrayList<>(); expectedTypes.add(TypeInfoFactory.getCharTypeInfo(1)); expectedTypes.add(TypeInfoFactory.getCharTypeInfo(20)); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.varcharTypeInfo); expectedTypes.add(TypeInfoFactory.getVarcharTypeInfo(1024)); expectedTypes.add(TypeInfoFactory.varcharTypeInfo); expectedTypes.add(TypeInfoFactory.booleanTypeInfo); expectedTypes.add(TypeInfoFactory.byteTypeInfo); expectedTypes.add(TypeInfoFactory.shortTypeInfo); expectedTypes.add(TypeInfoFactory.intTypeInfo); expectedTypes.add(TypeInfoFactory.longTypeInfo); expectedTypes.add(TypeInfoFactory.getDecimalTypeInfo(38, 0)); expectedTypes.add(TypeInfoFactory.getDecimalTypeInfo(9, 3)); expectedTypes.add(TypeInfoFactory.floatTypeInfo); expectedTypes.add(TypeInfoFactory.doubleTypeInfo); expectedTypes.add(TypeInfoFactory.getDecimalTypeInfo(38, 0)); expectedTypes.add(TypeInfoFactory.dateTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.timestampTypeInfo); expectedTypes.add(TypeInfoFactory.timestampTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); expectedTypes.add(TypeInfoFactory.getListTypeInfo(TypeInfoFactory.unknownTypeInfo)); expectedTypes.add(TypeInfoFactory.unknownTypeInfo); Assert.assertEquals(expectedTypes, accessor.getColumnTypes(conf)); }
@Probe(name = OPERATION_METRIC_PARKER_PARK_QUEUE_COUNT) public int getParkQueueCount() { return waitSetMap.size(); }
@Test public void testAwaitQueueCount_shouldNotExceedBlockedThreadCount() { final HazelcastInstance hz = createHazelcastInstance(); NodeEngineImpl nodeEngine = getNode(hz).nodeEngine; OperationParkerImpl waitNotifyService = (OperationParkerImpl) nodeEngine.getOperationParker(); final int keyCount = 1000; int nThreads = 4; CountDownLatch latch = new CountDownLatch(nThreads); for (int i = 0; i < nThreads; i++) { new Thread(new LockWaitAndUnlockTask(hz, keyCount, latch)).start(); } while (latch.getCount() > 0) { LockSupport.parkNanos(1); int awaitQueueCount = waitNotifyService.getParkQueueCount(); Assert.assertTrue( "Await queue count should be smaller than total number of threads: " + awaitQueueCount + " VS " + nThreads, awaitQueueCount < nThreads); } }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { return new BigQueryTable(table, getConversionOptions(table.getProperties())); }
@Test public void testSelectDefaultMethodExplicitly() { Table table = fakeTableWithProperties( "hello", "{" + METHOD_PROPERTY + ": " + "\"" + Method.DEFAULT.toString() + "\" }"); BigQueryTable sqlTable = (BigQueryTable) provider.buildBeamSqlTable(table); assertEquals(Method.DEFAULT, sqlTable.method); }
public static TableElements of(final TableElement... elements) { return new TableElements(ImmutableList.copyOf(elements)); }
@Test public void shouldSupportKeyColumnsAfterValues() { // Given: final TableElement key = tableElement( "key", STRING_TYPE, KEY_CONSTRAINT); final TableElement value = tableElement("v0", INT_TYPE); final List<TableElement> elements = ImmutableList.of(value, key); // When: final TableElements result = TableElements.of(elements); // Then: assertThat(result, contains(value, key)); }
@Override public RouteContext route(final ShardingRule shardingRule) { RouteContext result = new RouteContext(); for (String each : shardingRule.getDataSourceNames()) { result.getRouteUnits().add(new RouteUnit(new RouteMapper(each, each), Collections.emptyList())); } return result; }
@Test void assertRoute() { ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration(); shardingRuleConfig.getTables().add(new ShardingTableRuleConfiguration("t_order", "ds_${0..1}.t_order_${0..2}")); RouteContext routeContext = shardingDatabaseBroadcastRoutingEngine .route(new ShardingRule(shardingRuleConfig, Maps.of("ds_0", new MockedDataSource(), "ds_1", new MockedDataSource()), mock(ComputeNodeInstanceContext.class))); List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits()); assertThat(routeContext.getRouteUnits().size(), is(2)); assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_0")); assertThat(routeUnits.get(1).getDataSourceMapper().getActualName(), is("ds_1")); }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testSimpleToHtml() throws IOException { final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList, Period.TOUT, writer); htmlReport.toHtml(); assertNotEmptyAndClear(writer); }
@Override public void accept(T t) { updateTimeHighWaterMark(t.time()); shortTermStorage.add(t); drainDueToLatestInput(t); //standard drain policy drainDueToTimeHighWaterMark(); //prevent blow-up when data goes backwards in time sizeHighWaterMark = Math.max(sizeHighWaterMark, shortTermStorage.size()); }
@Test public void testEmittedPointsAreNotProperlySorted() { //with this lag things do not get sorted correctly Duration maxLag = Duration.ofSeconds(2L); TimeOrderVerifyingConsumer consumer = new TimeOrderVerifyingConsumer(); ApproximateTimeSorter<TimePojo> sorter = new ApproximateTimeSorter<>(maxLag, consumer); sorter.accept(new TimePojo(EPOCH)); assertEquals(0, consumer.size()); sorter.accept(new TimePojo(EPOCH.plusSeconds(1))); assertEquals(0, consumer.size()); sorter.accept(new TimePojo(EPOCH.plusSeconds(10))); assertEquals(2, consumer.size()); assertEquals(consumer.timeFor(0), EPOCH); assertEquals(consumer.timeFor(1), EPOCH.plusSeconds(1)); sorter.accept(new TimePojo(EPOCH.plusSeconds(5))); assertEquals(3, consumer.size()); assertEquals(consumer.timeFor(2), EPOCH.plusSeconds(5)); //this TestConsumer fails here because this point is passed through the sorter (after the 5 point was) assertThrows( AssertionError.class, () -> sorter.accept(new TimePojo(EPOCH.plusSeconds(4))) ); }
public void handleReceive(RpcClientResponse response, Span span) { handleFinish(response, span); }
@Test void handleReceive_finishesSpanEvenIfUnwrappedNull_withError() { brave.Span span = mock(brave.Span.class); when(span.context()).thenReturn(context); when(span.customizer()).thenReturn(span); Exception error = new RuntimeException("peanuts"); when(response.error()).thenReturn(error); handler.handleReceive(response, span); verify(span).isNoop(); verify(span).context(); verify(span).customizer(); verify(span).error(error); verify(span).finish(); verifyNoMoreInteractions(span); }
static GlobalMetaData mergeInto(FileMetaData toMerge, GlobalMetaData mergedMetadata) { return mergeInto(toMerge, mergedMetadata, true); }
@Test public void testMergeMetadataWithNoConflictingKeyValues() { Map<String, String> keyValues1 = new HashMap<String, String>() { { put("a", "b"); } }; Map<String, String> keyValues2 = new HashMap<String, String>() { { put("c", "d"); } }; FileMetaData md1 = new FileMetaData( new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), keyValues1, "test"); FileMetaData md2 = new FileMetaData( new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), keyValues2, "test"); GlobalMetaData merged = ParquetFileWriter.mergeInto(md2, ParquetFileWriter.mergeInto(md1, null)); Map<String, String> mergedValues = merged.merge(new StrictKeyValueMetadataMergeStrategy()).getKeyValueMetaData(); assertEquals("b", mergedValues.get("a")); assertEquals("d", mergedValues.get("c")); }
@Override public <T> UncommittedBundle<T> createRootBundle() { return UncommittedImmutableListBundle.create(null, StructuralKey.empty()); }
@Test public void addAfterCommitShouldThrowException() { UncommittedBundle<Integer> bundle = bundleFactory.createRootBundle(); bundle.add(WindowedValue.valueInGlobalWindow(1)); CommittedBundle<Integer> firstCommit = bundle.commit(Instant.now()); assertThat(firstCommit.getElements(), containsInAnyOrder(WindowedValue.valueInGlobalWindow(1))); thrown.expect(IllegalStateException.class); thrown.expectMessage("3"); thrown.expectMessage("committed"); bundle.add(WindowedValue.valueInGlobalWindow(3)); }
@Override protected boolean notExist() { return Stream.of(ApolloPathConstants.PLUGIN_DATA_ID, ApolloPathConstants.AUTH_DATA_ID, ApolloPathConstants.META_DATA_ID, ApolloPathConstants.PROXY_SELECTOR_DATA_ID) .map(NodeDataPathUtils::appendListStuff) .allMatch(this::dataIdNotExist); }
@Test public void testAllExist() { when(apolloClient.getItemValue(join(PLUGIN_DATA_ID))).thenReturn(META_DATA_ID); when(apolloClient.getItemValue(join(AUTH_DATA_ID))).thenReturn(META_DATA_ID); when(apolloClient.getItemValue(join(META_DATA_ID))).thenReturn(META_DATA_ID); when(apolloClient.getItemValue(join(PROXY_SELECTOR_DATA_ID))).thenReturn(META_DATA_ID); Assertions.assertFalse(apolloDataChangedInit.notExist(), "some key not exist."); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void retryOnResultUsingFlowable() throws InterruptedException { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willReturn("retry") .willReturn("success"); Flowable.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .await() .assertValueCount(1) .assertValue("success") .assertComplete() .assertSubscribed(); then(helloWorldService).should(times(2)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1); }
public final void contains(@Nullable Object element) { if (!Iterables.contains(checkNotNull(actual), element)) { List<@Nullable Object> elementList = newArrayList(element); if (hasMatchingToStringPair(actual, elementList)) { failWithoutActual( fact("expected to contain", element), fact("an instance of", objectToTypeName(element)), simpleFact("but did not"), fact( "though it did contain", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual, /* itemsToCheck= */ elementList))), fullContents()); } else { failWithActual("expected to contain", element); } } }
@Test public void iterableContainsFailsWithSameToStringAndNull() { expectFailureWhenTestingThat(asList(1, "null")).contains(null); assertFailureValue("an instance of", "null type"); }
@Override public void setTrackEventCallBack(SensorsDataTrackEventCallBack trackEventCallBack) { }
@Test public void setTrackEventCallBack() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.track("TestTrackEvent", new JSONObject()); }
@Override public boolean test(Pickle pickle) { URI picklePath = pickle.getUri(); if (!lineFilters.containsKey(picklePath)) { return true; } for (Integer line : lineFilters.get(picklePath)) { if (Objects.equals(line, pickle.getLocation().getLine()) || Objects.equals(line, pickle.getScenarioLocation().getLine()) || pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) { return true; } } return false; }
@Test void matches_third_example() { LinePredicate predicate = new LinePredicate(singletonMap( featurePath, singletonList(12))); assertFalse(predicate.test(firstPickle)); assertFalse(predicate.test(secondPickle)); assertTrue(predicate.test(thirdPickle)); assertFalse(predicate.test(fourthPickle)); }
public Optional<String> getType(Set<String> streamIds, String field) { final Map<String, Set<String>> allFieldTypes = this.get(streamIds); final Set<String> fieldTypes = allFieldTypes.get(field); return typeFromFieldType(fieldTypes); }
@Test void returnsEmptyOptionalIfMultipleTypesExistForField() { final Pair<IndexFieldTypesService, StreamService> services = mockServices( IndexFieldTypesDTO.create("indexSet1", "stream1", ImmutableSet.of( FieldTypeDTO.create("somefield", "long") )), IndexFieldTypesDTO.create("indexSet2", "stream1", ImmutableSet.of( FieldTypeDTO.create("somefield", "float") )) ); final FieldTypesLookup lookup = new FieldTypesLookup(services.getLeft(), services.getRight()); final Optional<String> result = lookup.getType(Collections.singleton("stream1"), "somefield"); assertThat(result).isEmpty(); }
public static Read<String> readStrings() { return Read.newBuilder( (PubsubMessage message) -> new String(message.getPayload(), StandardCharsets.UTF_8)) .setCoder(StringUtf8Coder.of()) .build(); }
@Test public void testRuntimeValueProviderTopic() { TestPipeline pipeline = TestPipeline.create(); ValueProvider<String> topic = pipeline.newProvider("projects/project/topics/topic"); Read<String> pubsubRead = PubsubIO.readStrings().fromTopic(topic); pipeline.apply(pubsubRead); assertThat(pubsubRead.getTopicProvider(), not(nullValue())); assertThat(pubsubRead.getTopicProvider().isAccessible(), is(false)); }
@PostMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE) public Boolean createNamespace(@RequestParam("customNamespaceId") String namespaceId, @RequestParam("namespaceName") String namespaceName, @RequestParam(value = "namespaceDesc", required = false) String namespaceDesc) { if (StringUtils.isBlank(namespaceId)) { namespaceId = UUID.randomUUID().toString(); } else { namespaceId = namespaceId.trim(); if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) { return false; } if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) { return false; } // check unique if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) { return false; } } // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) { return false; } try { return namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc); } catch (NacosException e) { return false; } }
@Test void testCreateNamespaceWithNonUniqueId() throws Exception { when(namespacePersistService.tenantInfoCountByTenantId("test-Id")).thenReturn(1); assertFalse(namespaceController.createNamespace("test-Id", "testNam2", "testDesc")); }
public Response downloadDaemonLogFile(String host, String fileName, String user) throws IOException { return logFileDownloadHelper.downloadFile(host, fileName, user, true); }
@Test public void testDownloadDaemonLogFile() throws IOException { try (TmpPath rootPath = new TmpPath()) { LogviewerLogDownloadHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath()); Response response = handler.downloadDaemonLogFile("host","nimbus.log", "user"); Utils.forceDelete(rootPath.toString()); assertThat(response.getStatus(), is(Response.Status.OK.getStatusCode())); assertThat(response.getEntity(), not(nullValue())); String contentDisposition = response.getHeaderString(HttpHeaders.CONTENT_DISPOSITION); assertThat(contentDisposition, containsString("host-nimbus.log")); } }