focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "Returns the cube root of an INT value") public Double cbrt( @UdfParameter( value = "value", description = "The value to get the cube root of." ) final Integer value ) { return cbrt(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZero() { assertThat(udf.cbrt(0), closeTo(0.0, 0.000000000000001)); assertThat(udf.cbrt(0L), closeTo(0.0, 0.000000000000001)); assertThat(udf.cbrt(0.0), closeTo(0.0, 0.000000000000001)); }
public static ClusterAllocationDiskSettings create(boolean enabled, String low, String high, String floodStage) { if (!enabled) { return ClusterAllocationDiskSettings.create(enabled, null); } return ClusterAllocationDiskSettings.create(enabled, createWatermarkSettings(low, high, floodStage)); }
@Test(expected = Exception.class) public void throwExceptionWhenMixedSettings() throws Exception { ClusterAllocationDiskSettingsFactory.create(true, "10Gb", "10%", ""); }
@Override public void add(Component file, Duplication duplication) { checkFileComponentArgument(file); checkNotNull(duplication, "duplication can not be null"); duplications.put(file.getKey(), duplication); }
@Test public void add_throws_NPE_if_file_argument_is_null() { assertThatThrownBy(() -> underTest.add(null, SOME_DUPLICATION)) .isInstanceOf(NullPointerException.class) .hasMessage("file can not be null"); }
public static ParamType getVarArgsSchemaFromType(final Type type) { return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE); }
@Test public void shouldGetLongSchemaForLongPrimitiveClassVariadic() { assertThat( UdfUtil.getVarArgsSchemaFromType(long.class), equalTo(ParamTypes.LONG) ); }
@Override public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) { super.pre(span, exchange, endpoint); String partition = getValue(exchange, PARTITION, Integer.class); if (partition != null) { span.setTag(KAFKA_PARTITION_TAG, partition); } String partitionKey = exchange.getIn().getHeader(PARTITION_KEY, String.class); if (partitionKey != null) { span.setTag(KAFKA_PARTITION_KEY_TAG, partitionKey); } String key = exchange.getIn().getHeader(KEY, String.class); if (key != null) { span.setTag(KAFKA_KEY_TAG, key); } String offset = getValue(exchange, OFFSET, String.class); if (offset != null) { span.setTag(KAFKA_OFFSET_TAG, offset); } }
@Test public void testPrePartitionAsIntegerHeaderAndOffsetAsLongHeader() { Long testOffset = 4875454L; Integer testPartition = 0; Endpoint endpoint = Mockito.mock(Endpoint.class); Exchange exchange = Mockito.mock(Exchange.class); Message message = Mockito.mock(Message.class); Mockito.when(endpoint.getEndpointUri()).thenReturn("test"); Mockito.when(exchange.getIn()).thenReturn(message); Mockito.when(message.getHeader(KafkaSpanDecorator.OFFSET, String.class)).thenReturn(testOffset.toString()); Mockito.when(message.getHeader(KafkaSpanDecorator.PARTITION, String.class)).thenReturn(testPartition.toString()); SpanDecorator decorator = new KafkaSpanDecorator(); MockSpanAdapter span = new MockSpanAdapter(); decorator.pre(span, exchange, endpoint); assertEquals(String.valueOf(testOffset), span.tags().get(KafkaSpanDecorator.KAFKA_OFFSET_TAG)); assertEquals(String.valueOf(testPartition), span.tags().get(KafkaSpanDecorator.KAFKA_PARTITION_TAG)); }
public static String toLowerCamel(String src) { return toUnderline(src, false); }
@Test public void testToLowerCamel() { String result = FieldUtils.toLowerCamel("ToLowerCamel"); Assert.assertEquals("to_lower_camel", result); }
public static DeweyNumber fromString(final String deweyNumberString) { String[] splits = deweyNumberString.split("\\."); if (splits.length == 1) { return new DeweyNumber(Integer.parseInt(deweyNumberString)); } else if (splits.length > 0) { int[] deweyNumber = new int[splits.length]; for (int i = 0; i < splits.length; i++) { deweyNumber[i] = Integer.parseInt(splits[i]); } return new DeweyNumber(deweyNumber); } else { throw new IllegalArgumentException( "Failed to parse " + deweyNumberString + " as a Dewey number"); } }
@Test(expected = IllegalArgumentException.class) public void testZeroSplitsDeweyNumber() { DeweyNumber.fromString("."); }
@Override public Optional<ErrorResponse> filter(DiscFilterRequest request) { try { Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request) .map(AthenzIdentities::from) .map(AthenzPrincipal::new); if (certificatePrincipal.isEmpty()) { String errorMessage = "Unable to authenticate Athenz identity. " + "Either client certificate or principal token is required."; return createResponse(request, Response.Status.UNAUTHORIZED, errorMessage); } AthenzPrincipal principal = certificatePrincipal.get(); request.setUserPrincipal(principal); request.setRemoteUser(principal.getName()); request.setAttribute(RESULT_PRINCIPAL, principal); return Optional.empty(); } catch (Exception e) { return createResponse(request, Response.Status.UNAUTHORIZED, e.getMessage()); } }
@Test void no_response_produced_when_passthrough_mode_is_enabled() { DiscFilterRequest request = FilterTestUtils.newRequestBuilder().build(); ResponseHandlerMock responseHandler = new ResponseHandlerMock(); AthenzPrincipalFilter filter = createFilter(true); filter.filter(request, responseHandler); assertNull(responseHandler.response); }
String parseAndGenerateOutput(String json) { JsonNode jsonNode; try { jsonNode = Jackson.mapper().readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); return switch (status) { case statusUnknown -> "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress -> "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished -> "File distribution finished"; default -> throw new RuntimeException("Unknown status " + status); }; }
@Test public void finishedForAllHosts() { String output = client.parseAndGenerateOutput("{\"status\":\"FINISHED\"}"); assertEquals("File distribution finished", output); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testBlacklist() throws Exception { JmxCollector jc = new JmxCollector( "\n---\nwhitelistObjectNames:\n- java.lang:*\n- org.apache.cassandra.concurrent:*\nblacklistObjectNames:\n- org.apache.cassandra.concurrent:*" .replace('`', '"')) .register(prometheusRegistry); // Test what should and shouldn't be present. assertNotNull( getSampleValue( "java_lang_OperatingSystem_ProcessCpuTime", new String[] {}, new String[] {})); assertNull( getSampleValue( "org_apache_cassandra_concurrent_CONSISTENCY_MANAGER_ActiveCount", new String[] {}, new String[] {})); assertNull( getSampleValue( "org_apache_cassandra_metrics_Compaction_Value", new String[] {"name"}, new String[] {"CompletedTasks"})); assertNull( getSampleValue( "hadoop_DataNode_replaceBlockOpMinTime", new String[] {"name"}, new String[] {"DataNodeActivity-ams-hdd001-50010"})); }
public static Builder builder() { return new Builder(); }
@Test public void testEqualsAndHashCode() { PluginData pluginData1 = PluginData.builder().id("id").name("name").config("config") .role("role").enabled(true).sort(0).build(); PluginData pluginData2 = PluginData.builder().id("id").name("name").config("config") .role("role").enabled(true).sort(0).build(); Set<PluginData> set = new HashSet<>(); set.add(pluginData1); set.add(pluginData2); assertThat(set, hasSize(1)); }
@Override public String getName() { return _name; }
@Test public void testStringConcatTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression( String.format("concat(%s, %s, '-')", STRING_ALPHANUM_SV_COLUMN, STRING_ALPHANUM_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "concat"); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _stringAlphaNumericSVValues[i] + "-" + _stringAlphaNumericSVValues[i]; } testTransformFunction(transformFunction, expectedValues); }
@Override public RSet<V> get(final K key) { String keyHash = keyHash(key); final String setName = getValuesName(keyHash); return new RedissonSet<V>(codec, commandExecutor, setName, null) { @Override public RFuture<Boolean> addAsync(V value) { return RedissonSetMultimap.this.putAsync(key, value); } @Override public RFuture<Boolean> addAllAsync(Collection<? extends V> c) { return RedissonSetMultimap.this.putAllAsync(key, c); } @Override public RFuture<Boolean> removeAsync(Object value) { return RedissonSetMultimap.this.removeAsync(key, value); } @Override public RFuture<Boolean> removeAllAsync(Collection<?> c) { if (c.isEmpty()) { return new CompletableFutureWrapper<>(false); } List<Object> args = new ArrayList<Object>(c.size() + 1); args.add(encodeMapKey(key)); encode(args, c); return commandExecutor.evalWriteAsync(RedissonSetMultimap.this.getRawName(), codec, RedisCommands.EVAL_BOOLEAN_AMOUNT, "local count = 0;" + "for i=2, #ARGV, 5000 do " + "count = count + redis.call('srem', KEYS[2], unpack(ARGV, i, math.min(i+4999, table.getn(ARGV)))) " + "end; " + "if count > 0 then " + "if redis.call('scard', KEYS[2]) == 0 then " + "redis.call('hdel', KEYS[1], ARGV[1]); " + "end; " + "return 1;" + "end;" + "return 0; ", Arrays.<Object>asList(RedissonSetMultimap.this.getRawName(), setName), args.toArray()); } @Override public RFuture<Boolean> deleteAsync() { ByteBuf keyState = encodeMapKey(key); return RedissonSetMultimap.this.fastRemoveAsync(Arrays.asList(keyState), Arrays.asList(RedissonSetMultimap.this.getRawName(), setName), RedisCommands.EVAL_BOOLEAN_AMOUNT); } @Override public RFuture<Boolean> clearExpireAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> expireAsync(long timeToLive, TimeUnit timeUnit, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override protected RFuture<Boolean> expireAtAsync(long timestamp, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Long> remainTimeToLiveAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Void> renameAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> renamenxAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } }; }
@Test public void testPutAll() { RSetMultimap<String, String> map = redisson.getSetMultimap("test1"); List<String> values = Arrays.asList("1", "2", "3"); assertThat(map.putAll("0", values)).isTrue(); assertThat(map.putAll("0", Arrays.asList("1"))).isFalse(); assertThat(map.get("0")).containsOnlyElementsOf(values); List<String> vals = new ArrayList<>(); for (int i = 0; i < 10000; i++) { vals.add("" + i); } map.putAll("0", vals); }
public Optional<User> login(String nameOrEmail, String password) { if (nameOrEmail == null || password == null) { return Optional.empty(); } User user = userDAO.findByName(nameOrEmail); if (user == null) { user = userDAO.findByEmail(nameOrEmail); } if (user != null && !user.isDisabled()) { boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt()); if (authenticated) { performPostLoginActivities(user); return Optional.of(user); } } return Optional.empty(); }
@Test void apiLoginShouldNotReturnUserIfUserNotFoundFromLookupByApikey() { Mockito.when(userDAO.findByApiKey("apikey")).thenReturn(null); Optional<User> user = userService.login("apikey"); Assertions.assertFalse(user.isPresent()); }
boolean isSynced() throws Exception { EthSyncing ethSyncing = web3j.ethSyncing().send(); if (ethSyncing.isSyncing()) { return false; } else { EthBlock ethBlock = web3j.ethGetBlockByNumber(DefaultBlockParameterName.LATEST, false).send(); long timestamp = ethBlock.getBlock().getTimestamp().longValue() * 1000; return System.currentTimeMillis() - syncThreshold < timestamp; } }
@Test public void testIsSyncedFullySynced() throws Exception { configureSyncing(false); configureLatestBlock(System.currentTimeMillis() / 1000); // block timestamp is in seconds assertTrue(ensResolver.isSynced()); }
Serde<List<?>> createFormatSerde( final String target, final FormatInfo formatInfo, final PersistenceSchema schema, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> schemaRegistryClientFactory, final boolean isKey ) { final Format format = formatFactory.apply(formatInfo); try { return format .getSerde(schema, formatInfo.getProperties(), ksqlConfig, schemaRegistryClientFactory, isKey ); } catch (final Exception e) { throw new SchemaNotSupportedException(target + " format does not support schema." + System.lineSeparator() + "format: " + format.name() + System.lineSeparator() + "schema: " + schema + System.lineSeparator() + "reason: " + e.getMessage(), e ); } }
@Test public void shouldInvokeFormatFactoryWithCorrectParams() { // When: serdeFactory.createFormatSerde("target", formatInfo, schema, config, srClientFactory, false); // Then: verify(formatFactory).apply(formatInfo); }
@Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { TopicConfig topicConfig = new TopicConfig(); topicConfig.setReadQueueNums(8); topicConfig.setWriteQueueNums(8); topicConfig.setTopicName(commandLine.getOptionValue('t').trim()); if (commandLine.hasOption('a')) { String attributesModification = commandLine.getOptionValue('a').trim(); Map<String, String> attributes = AttributeParser.parseToMap(attributesModification); topicConfig.setAttributes(attributes); } // readQueueNums if (commandLine.hasOption('r')) { topicConfig.setReadQueueNums(Integer.parseInt(commandLine.getOptionValue('r').trim())); } // writeQueueNums if (commandLine.hasOption('w')) { topicConfig.setWriteQueueNums(Integer.parseInt(commandLine.getOptionValue('w').trim())); } // perm if (commandLine.hasOption('p')) { topicConfig.setPerm(Integer.parseInt(commandLine.getOptionValue('p').trim())); } boolean isUnit = false; if (commandLine.hasOption('u')) { isUnit = Boolean.parseBoolean(commandLine.getOptionValue('u').trim()); } boolean isCenterSync = false; if (commandLine.hasOption('s')) { isCenterSync = Boolean.parseBoolean(commandLine.getOptionValue('s').trim()); } int topicCenterSync = TopicSysFlag.buildSysFlag(isUnit, isCenterSync); topicConfig.setTopicSysFlag(topicCenterSync); boolean isOrder = false; if (commandLine.hasOption('o')) { isOrder = Boolean.parseBoolean(commandLine.getOptionValue('o').trim()); } topicConfig.setOrder(isOrder); if (commandLine.hasOption('b')) { String addr = commandLine.getOptionValue('b').trim(); defaultMQAdminExt.start(); defaultMQAdminExt.createAndUpdateTopicConfig(addr, topicConfig); if (isOrder) { String brokerName = CommandUtil.fetchBrokerNameByAddr(defaultMQAdminExt, addr); String orderConf = brokerName + ":" + topicConfig.getWriteQueueNums(); defaultMQAdminExt.createOrUpdateOrderConf(topicConfig.getTopicName(), orderConf, false); System.out.printf("%s%n", String.format("set broker orderConf. isOrder=%s, orderConf=[%s]", isOrder, orderConf.toString())); } System.out.printf("create topic to %s success.%n", addr); System.out.printf("%s%n", topicConfig); return; } else if (commandLine.hasOption('c')) { String clusterName = commandLine.getOptionValue('c').trim(); defaultMQAdminExt.start(); Set<String> masterSet = CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName); for (String addr : masterSet) { defaultMQAdminExt.createAndUpdateTopicConfig(addr, topicConfig); System.out.printf("create topic to %s success.%n", addr); } if (isOrder) { Set<String> brokerNameSet = CommandUtil.fetchBrokerNameByClusterName(defaultMQAdminExt, clusterName); StringBuilder orderConf = new StringBuilder(); String splitor = ""; for (String s : brokerNameSet) { orderConf.append(splitor).append(s).append(":") .append(topicConfig.getWriteQueueNums()); splitor = ";"; } defaultMQAdminExt.createOrUpdateOrderConf(topicConfig.getTopicName(), orderConf.toString(), true); System.out.printf("set cluster orderConf. isOrder=%s, orderConf=[%s]%n", isOrder, orderConf); } System.out.printf("%s%n", topicConfig); return; } ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() { UpdateTopicSubCommand cmd = new UpdateTopicSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] { "-b 127.0.0.1:10911", "-t unit-test", "-r 8", "-w 8", "-p 6", "-o false", "-u false", "-s false"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); assertThat(commandLine.getOptionValue('b').trim()).isEqualTo("127.0.0.1:10911"); assertThat(commandLine.getOptionValue('r').trim()).isEqualTo("8"); assertThat(commandLine.getOptionValue('w').trim()).isEqualTo("8"); assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test"); assertThat(commandLine.getOptionValue('p').trim()).isEqualTo("6"); assertThat(commandLine.getOptionValue('o').trim()).isEqualTo("false"); assertThat(commandLine.getOptionValue('u').trim()).isEqualTo("false"); assertThat(commandLine.getOptionValue('s').trim()).isEqualTo("false"); }
public TaskType getTaskType() { return type; }
@Test public void testGetTaskType0args() { JobID jobId = new JobID("1234", 0); for (TaskType type : TaskType.values()) { TaskID taskId = new TaskID(jobId, type, 0); assertEquals("TaskID incorrectly reported its type", type, taskId.getTaskType()); } TaskID taskId = new TaskID(); assertEquals("TaskID of default type incorrectly reported its type", TaskType.REDUCE, taskId.getTaskType()); }
public StopWatch() { this(true); }
@Test public void testStopWatch() throws Exception { StopWatch watch = new StopWatch(); Thread.sleep(200); long taken = watch.stop(); assertEquals(taken, watch.taken()); assertTrue("Should take approx 200 millis, was: " + taken, taken > 150); }
@Override public String getUrl() { return url != null ? url.originalArgument() : null; }
@Test void shouldHandleNullUrlAtTheTimeOfGitMaterialConfigCreation() { GitMaterialConfig config = git(null); assertNull(config.getUrl()); }
public static String getSanitizedClassName(String input) { String upperCasedInput = input.substring(0, 1).toUpperCase() + input.substring(1); return upperCasedInput.replaceAll("[^A-Za-z0-9]", ""); }
@Test void getSanitizedClassName() { classNameMap.forEach((originalName, expectedName) -> assertThat(KiePMMLModelUtils.getSanitizedClassName(originalName)).isEqualTo(expectedName)); }
public void trackFailure() { retryTime = TimeUtils.currentTime().plus(backoff); backoff = Duration.ofMillis((long) (backoff.toMillis() * params.multiplier)); if (backoff.compareTo(params.maximumInterval) > 0) backoff = params.maximumInterval; }
@Test public void testInQueue() { PriorityQueue<ExponentialBackoff> queue = new PriorityQueue<>(); ExponentialBackoff backoff1 = new ExponentialBackoff(params); backoff.trackFailure(); backoff.trackFailure(); backoff1.trackFailure(); backoff1.trackFailure(); backoff1.trackFailure(); queue.offer(backoff); queue.offer(backoff1); assertEquals(queue.poll(), backoff); // The one with soonest retry time assertEquals(queue.peek(), backoff1); queue.offer(backoff); assertEquals(queue.poll(), backoff); // Still the same one }
@Override public void setDefaultParameterTransformer(ParameterByTypeTransformer defaultParameterByTypeTransformer) { parameterTypeRegistry.setDefaultParameterTransformer(defaultParameterByTypeTransformer); }
@Test void should_set_default_parameter_transformer() { ParameterByTypeTransformer expected = (fromValue, toValueType) -> null; registry.setDefaultParameterTransformer(expected); }
public void shutdown() throws IOException { refCount.reference(); boolean exc = true; try { shutdown0(fd); exc = false; } finally { unreference(exc); } }
@Test(timeout=180000) public void testShutdown() throws Exception { final AtomicInteger bytesRead = new AtomicInteger(0); final AtomicBoolean failed = new AtomicBoolean(false); final DomainSocket[] socks = DomainSocket.socketpair(); Runnable reader = new Runnable() { @Override public void run() { while (true) { try { int ret = socks[1].getInputStream().read(); if (ret == -1) return; bytesRead.addAndGet(1); } catch (IOException e) { DomainSocket.LOG.error("reader error", e); failed.set(true); return; } } } }; Thread readerThread = new Thread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); socks[0].getOutputStream().write(3); Assert.assertTrue(readerThread.isAlive()); socks[0].shutdown(); readerThread.join(); Assert.assertFalse(failed.get()); Assert.assertEquals(3, bytesRead.get()); IOUtils.cleanupWithLogger(null, socks); }
@Override public CRFModel train(SequenceDataset<Label> sequenceExamples, Map<String, Provenance> runProvenance) { if (sequenceExamples.getOutputInfo().getUnknownCount() > 0) { throw new IllegalArgumentException("The supplied Dataset contained unknown Outputs, and this Trainer is supervised."); } // Creates a new RNG, adds one to the invocation count, generates a local optimiser. SplittableRandom localRNG; TrainerProvenance trainerProvenance; StochasticGradientOptimiser localOptimiser; synchronized(this) { localRNG = rng.split(); localOptimiser = optimiser.copy(); trainerProvenance = getProvenance(); trainInvocationCounter++; } ImmutableOutputInfo<Label> labelIDMap = sequenceExamples.getOutputIDInfo(); ImmutableFeatureMap featureIDMap = sequenceExamples.getFeatureIDMap(); SGDVector[][] sgdFeatures = new SGDVector[sequenceExamples.size()][]; int[][] sgdLabels = new int[sequenceExamples.size()][]; double[] weights = new double[sequenceExamples.size()]; int n = 0; for (SequenceExample<Label> example : sequenceExamples) { weights[n] = example.getWeight(); Pair<int[],SGDVector[]> pair = CRFModel.convertToVector(example,featureIDMap,labelIDMap); sgdFeatures[n] = pair.getB(); sgdLabels[n] = pair.getA(); n++; } logger.info(String.format("Training SGD CRF with %d examples", n)); CRFParameters crfParameters = new CRFParameters(featureIDMap.size(),labelIDMap.size()); localOptimiser.initialise(crfParameters); double loss = 0.0; int iteration = 0; for (int i = 0; i < epochs; i++) { if (shuffle) { Util.shuffleInPlace(sgdFeatures, sgdLabels, weights, localRNG); } if (minibatchSize == 1) { /* * Special case a minibatch of size 1. Directly updates the parameters after each * example rather than aggregating. */ for (int j = 0; j < sgdFeatures.length; j++) { Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]); loss += output.getA()*weights[j]; //Update the gradient with the current learning rates Tensor[] updates = localOptimiser.step(output.getB(),weights[j]); //Apply the update to the current parameters. crfParameters.update(updates); iteration++; if ((iteration % loggingInterval == 0) && (loggingInterval != -1)) { logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval); loss = 0.0; } } } else { Tensor[][] gradients = new Tensor[minibatchSize][]; for (int j = 0; j < sgdFeatures.length; j += minibatchSize) { double tempWeight = 0.0; int curSize = 0; //Aggregate the gradient updates for each example in the minibatch for (int k = j; k < j+minibatchSize && k < sgdFeatures.length; k++) { Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]); loss += output.getA()*weights[k]; tempWeight += weights[k]; gradients[k-j] = output.getB(); curSize++; } //Merge the values into a single gradient update Tensor[] updates = crfParameters.merge(gradients,curSize); for (Tensor update : updates) { update.scaleInPlace(minibatchSize); } tempWeight /= minibatchSize; //Update the gradient with the current learning rates updates = localOptimiser.step(updates,tempWeight); //Apply the gradient. crfParameters.update(updates); iteration++; if ((loggingInterval != -1) && (iteration % loggingInterval == 0)) { logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval); loss = 0.0; } } } } localOptimiser.finalise(); //public CRFModel(String name, String description, ImmutableInfoMap featureIDMap, ImmutableInfoMap outputIDInfo, CRFParameters parameters) { ModelProvenance provenance = new ModelProvenance(CRFModel.class.getName(),OffsetDateTime.now(),sequenceExamples.getProvenance(),trainerProvenance,runProvenance); CRFModel model = new CRFModel("crf-sgd-model",provenance,featureIDMap,labelIDMap,crfParameters); localOptimiser.reset(); return model; }
@Test public void testEmptyExample() { assertThrows(IllegalArgumentException.class, () -> { SequenceDataset<Label> p = SequenceDataGenerator.generateGorillaDataset(5); SequenceModel<Label> m = t.train(p); m.predict(SequenceDataGenerator.generateEmptyExample()); }); }
public FinishApplicationMasterResponse finishApplicationMaster( FinishApplicationMasterRequest request) throws YarnException, IOException { if (this.userUgi == null) { if (this.connectionInitiated) { // This is possible if the async launchUAM is still // blocked and retrying. Return a dummy response in this case. LOG.warn("Unmanaged AM still not successfully launched/registered yet." + " Stopping the UAM heartbeat thread anyways."); return FinishApplicationMasterResponse.newInstance(false); } else { throw new YarnException("finishApplicationMaster should not " + "be called before createAndRegister"); } } FinishApplicationMasterResponse response = this.rmProxyRelayer.finishApplicationMaster(request); if (response.getIsUnregistered()) { shutDownConnections(); } return response; }
@Test(expected = Exception.class) public void testFinishWithoutRegister() throws YarnException, IOException, InterruptedException { finishApplicationMaster( FinishApplicationMasterRequest.newInstance(null, null, null), attemptId); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { TemporaryResources tmp = TikaInputStream.isTikaInputStream(stream) ? null : new TemporaryResources(); TikaInputStream tis = TikaInputStream.get(stream, tmp, metadata); try { if (this.location == null) { this.location = tis.getFile().getParent() + File.separator; } this.studyFileName = tis.getFile().getName(); File locationFile = new File(location); String[] investigationList = locationFile.list((dir, name) -> name.matches("i_.+\\.txt")); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); parseInvestigation(investigationList, xhtml, metadata, context); parseStudy(stream, xhtml, metadata, context); parseAssay(xhtml, metadata, context); xhtml.endDocument(); } finally { if (tmp != null) { tmp.dispose(); } } }
@Test public void testParseArchive() throws Exception { String path = "/test-documents/testISATab_BII-I-1/s_BII-S-1.txt"; Parser parser = new ISArchiveParser( ISArchiveParserTest.class.getResource("/test-documents/testISATab_BII-I-1/").toURI() .getPath()); //Parser parser = new AutoDetectParser(); ContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); ParseContext context = new ParseContext(); try (InputStream stream = ISArchiveParserTest.class.getResourceAsStream(path)) { parser.parse(stream, handler, metadata, context); } // INVESTIGATION assertEquals("BII-I-1", metadata.get("Investigation Identifier"), "Invalid Investigation Identifier"); assertEquals("Growth control of the eukaryote cell: a systems biology study in yeast", metadata.get("Investigation Title"), "Invalid Investigation Title"); // INVESTIGATION PUBLICATIONS assertEquals("17439666", metadata.get("Investigation PubMed ID"), "Invalid Investigation PubMed ID"); assertEquals("doi:10.1186/jbiol54", metadata.get("Investigation Publication DOI"), "Invalid Investigation Publication DOI"); // INVESTIGATION CONTACTS assertEquals( "Oliver", metadata.get("Investigation Person Last Name"), "Invalid Investigation Person Last Name"); assertEquals("Stephen", metadata.get("Investigation Person First Name"), "Invalid Investigation Person First Name"); }
public T get(K key) { T metric = metrics.get(key); if (metric == null) { metric = factory.createInstance(key); metric = MoreObjects.firstNonNull(metrics.putIfAbsent(key, metric), metric); } return metric; }
@Test public void testReuseInstances() { AtomicLong foo1 = metricsMap.get("foo"); AtomicLong foo2 = metricsMap.get("foo"); assertThat(foo1, sameInstance(foo2)); }
public static BigDecimal ensureFit(final BigDecimal value, final Schema schema) { return ensureFit(value, precision(schema), scale(schema)); }
@Test public void shouldFailFitIfNotExactMatchMoreDigits() { // When: final Exception e = assertThrows( ArithmeticException.class, () -> ensureFit(new BigDecimal("12"), DECIMAL_SCHEMA) ); // Then: assertThat(e.getMessage(), containsString("Numeric field overflow: A field with precision 2 and " + "scale 1 must round to an absolute value less than 10^1. Got 12")); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithAllVersionsFromStsAndSps(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( mockSts("3.0.0"), mockSps(kafkaVersion), mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public CompletionStage<Void> updateTransactionallyAsync(final Account account, final Collection<TransactWriteItem> additionalWriteItems) { return AsyncTimerUtil.record(UPDATE_TRANSACTIONALLY_TIMER, () -> { final List<TransactWriteItem> writeItems = new ArrayList<>(additionalWriteItems.size() + 1); writeItems.add(UpdateAccountSpec.forAccount(accountsTableName, account).transactItem()); writeItems.addAll(additionalWriteItems); return asyncClient.transactWriteItems(TransactWriteItemsRequest.builder() .transactItems(writeItems) .build()) .thenApply(response -> { account.setVersion(account.getVersion() + 1); return (Void) null; }) .exceptionally(throwable -> { final Throwable unwrapped = ExceptionUtils.unwrap(throwable); if (unwrapped instanceof TransactionCanceledException transactionCanceledException) { if (CONDITIONAL_CHECK_FAILED.equals(transactionCanceledException.cancellationReasons().get(0).code())) { throw new ContestedOptimisticLockException(); } if (transactionCanceledException.cancellationReasons() .stream() .anyMatch(reason -> TRANSACTION_CONFLICT.equals(reason.code()))) { throw new ContestedOptimisticLockException(); } } throw CompletableFutureUtils.errorAsCompletionException(throwable); }); }); }
@Test void testUpdateTransactionallyWithMockTransactionConflictException() { final DynamoDbAsyncClient dynamoDbAsyncClient = mock(DynamoDbAsyncClient.class); accounts = new Accounts(mock(DynamoDbClient.class), dynamoDbAsyncClient, Tables.ACCOUNTS.tableName(), Tables.NUMBERS.tableName(), Tables.PNI_ASSIGNMENTS.tableName(), Tables.USERNAMES.tableName(), Tables.DELETED_ACCOUNTS.tableName()); when(dynamoDbAsyncClient.transactWriteItems(any(TransactWriteItemsRequest.class))) .thenReturn(CompletableFuture.failedFuture(TransactionCanceledException.builder() .cancellationReasons(CancellationReason.builder() .code("TransactionConflict") .build()) .build())); Account account = generateAccount("+14151112222", UUID.randomUUID(), UUID.randomUUID()); assertThatThrownBy(() -> accounts.updateTransactionallyAsync(account, Collections.emptyList()).toCompletableFuture().join()) .isInstanceOfAny(CompletionException.class) .hasCauseInstanceOf(ContestedOptimisticLockException.class); }
@Override public String getSinkTableName(Table table) { String tableName = table.getName(); Map<String, String> sink = config.getSink(); // Add table name mapping logic String mappingRoute = sink.get(FlinkCDCConfig.TABLE_MAPPING_ROUTES); if (mappingRoute != null) { Map<String, String> mappingRules = parseMappingRoute(mappingRoute); if (mappingRules.containsKey(tableName)) { tableName = mappingRules.get(tableName); } } tableName = sink.getOrDefault(FlinkCDCConfig.TABLE_PREFIX, "") + tableName + sink.getOrDefault(FlinkCDCConfig.TABLE_SUFFIX, ""); // table.lower and table.upper can not be true at the same time if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER)) && Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) { throw new IllegalArgumentException("table.lower and table.upper can not be true at the same time"); } if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) { tableName = tableName.toUpperCase(); } if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))) { tableName = tableName.toLowerCase(); } // Implement regular expressions to replace table names through // sink.table.replace.pattern and table.replace.with String replacePattern = sink.get(FlinkCDCConfig.TABLE_REPLACE_PATTERN); String replaceWith = sink.get(FlinkCDCConfig.TABLE_REPLACE_WITH); if (replacePattern != null && replaceWith != null) { Pattern pattern = Pattern.compile(replacePattern); Matcher matcher = pattern.matcher(tableName); tableName = matcher.replaceAll(replaceWith); } // add schema if (Boolean.parseBoolean(sink.get("table.prefix.schema"))) { tableName = table.getSchema() + "_" + tableName; } return tableName; }
@Test public void testGetSinkTableNameWithConversionLowerCase() { Map<String, String> sinkConfig = new HashMap<>(); sinkConfig.put("table.prefix", ""); sinkConfig.put("table.suffix", ""); sinkConfig.put("table.lower", "true"); sinkConfig.put("table.upper", "false"); when(config.getSink()).thenReturn(sinkConfig); Table table = new Table("TestTable", "TestSchema", null); String expectedTableName = "testtable"; Assert.assertEquals(expectedTableName, sinkBuilder.getSinkTableName(table)); }
static Callback create(@Nullable Callback delegate, Span span, CurrentTraceContext current) { if (delegate == null) return new FinishSpan(span); return new DelegateAndFinishSpan(delegate, span, current); }
@Test void on_completion_should_finish_span() { Span span = tracing.tracer().nextSpan().start(); Callback tracingCallback = TracingCallback.create(null, span, currentTraceContext); tracingCallback.onCompletion(createRecordMetadata(), null); assertThat(spans.get(0).finishTimestamp()).isNotZero(); }
public ConfigurationFactoryFactory<T> getConfigurationFactoryFactory() { return configurationFactoryFactory; }
@Test void defaultsToDefaultConfigurationFactoryFactory() throws Exception { assertThat(bootstrap.getConfigurationFactoryFactory()) .isInstanceOf(DefaultConfigurationFactoryFactory.class); }
@Override public int read() throws IOException { if (isEOF()) { return -1; } return mappedByteBuffer.get() & 0xff; }
@Test void testUnmapping() throws IOException { // This is a special test case for some unmapping issues limited to windows enviroments // see https://bugs.openjdk.java.net/browse/JDK-4724038 Path tempFile = Files.createTempFile("PDFBOX", "txt"); try (BufferedWriter bufferedWriter = Files.newBufferedWriter(tempFile, StandardOpenOption.WRITE)) { bufferedWriter.write("Apache PDFBox test"); } try (RandomAccessRead randomAccessSource = new RandomAccessReadMemoryMappedFile( tempFile.toFile())) { assertEquals(65, randomAccessSource.read()); } Files.delete(tempFile); }
public static int getAgeByIdCard(String idcard) { return getAgeByIdCard(idcard, DateUtil.date()); }
@Test public void getAgeByIdCardTest() { DateTime date = DateUtil.parse("2017-04-10"); int age = IdcardUtil.getAgeByIdCard(ID_18, date); assertEquals(age, 38); assertEquals(IdcardUtil.getAgeByIdCard(FOREIGN_ID_18, date), 32); int age2 = IdcardUtil.getAgeByIdCard(ID_15, date); assertEquals(age2, 28); }
@Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback); return bean; }
@Test void beansWithMethodsUsingJobContextAnnotatedWithRecurringIntervalAnnotationWillAutomaticallyBeRegistered() { // GIVEN final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor(); // WHEN recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringIntervalJobUsingJobContext(), "not important"); // THEN verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), jobDetailsArgumentCaptor.capture(), eq(new Interval("PT10M")), any(ZoneId.class)); final JobDetails actualJobDetails = jobDetailsArgumentCaptor.getValue(); assertThat(actualJobDetails) .isCacheable() .hasClassName(MyServiceWithRecurringIntervalJobUsingJobContext.class.getName()) .hasMethodName("myRecurringMethod") .hasJobContextArg(); }
@Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } NacosACMConfig that = (NacosACMConfig) o; return enabled == that.enabled && Objects.equals(endpoint, that.endpoint) && Objects.equals(namespace, that.namespace) && Objects.equals(accessKey, that.accessKey) && Objects.equals(secretKey, that.secretKey); }
@Test public void testEquals() { assertEquals(nacosACMConfig, nacosACMConfig); assertEquals(nacosACMConfig, that); assertNotEquals(null, nacosACMConfig); assertNotEquals(nacosACMConfig, new Object()); }
Metadata merge(Metadata other) { List<MappingField> fields = new ArrayList<>(concat(this.fields.stream(), other.fields.stream()) .collect(toCollection(() -> new TreeSet<>(Comparator.comparing(MappingField::name))))); Map<String, String> options = concat(this.options.entrySet().stream(), other.options.entrySet().stream()) .collect(LinkedHashMap::new, (map, entry) -> map.putIfAbsent(entry.getKey(), entry.getValue()), Map::putAll); return new Metadata(fields, options); }
@Test public void test_merge() { Metadata first = new Metadata( asList( new MappingField("field1", QueryDataType.INT, "__key.field1"), new MappingField("field2", QueryDataType.INT, "__key.field2") ), ImmutableMap.of("key1", "1", "key2", "2") ); Metadata second = new Metadata( asList( new MappingField("field2", QueryDataType.VARCHAR, "this.field2"), new MappingField("field3", QueryDataType.VARCHAR, "this.field3") ), ImmutableMap.of("key2", "two", "key3", "three") ); Metadata merged = first.merge(second); assertThat(merged).isEqualToComparingFieldByField(new Metadata( asList( new MappingField("field1", QueryDataType.INT, "__key.field1"), new MappingField("field2", QueryDataType.INT, "__key.field2"), new MappingField("field3", QueryDataType.VARCHAR, "this.field3") ), ImmutableMap.of("key1", "1", "key2", "2", "key3", "three") )); }
public static int findNextPositivePowerOfTwo(final int value) { assert value > Integer.MIN_VALUE && value < 0x40000000; return 1 << (32 - Integer.numberOfLeadingZeros(value - 1)); }
@Test public void testFindNextPositivePowerOfTwo() { assertEquals(1, findNextPositivePowerOfTwo(0)); assertEquals(1, findNextPositivePowerOfTwo(1)); assertEquals(1024, findNextPositivePowerOfTwo(1000)); assertEquals(1024, findNextPositivePowerOfTwo(1023)); assertEquals(2048, findNextPositivePowerOfTwo(2048)); assertEquals(1 << 30, findNextPositivePowerOfTwo((1 << 30) - 1)); assertEquals(1, findNextPositivePowerOfTwo(-1)); assertEquals(1, findNextPositivePowerOfTwo(-10000)); }
protected boolean isValidRequestor(HttpServletRequest request, Configuration conf) throws IOException { UserGroupInformation ugi = getUGI(request, conf); if (LOG.isDebugEnabled()) { LOG.debug("Validating request made by " + ugi.getUserName() + " / " + ugi.getShortUserName() + ". This user is: " + UserGroupInformation.getLoginUser()); } Set<String> validRequestors = new HashSet<String>(); validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf)); try { validRequestors.add( SecurityUtil.getServerPrincipal(conf .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), SecondaryNameNode.getHttpAddress(conf).getHostName())); } catch (Exception e) { // Don't halt if SecondaryNameNode principal could not be added. LOG.debug("SecondaryNameNode principal could not be added", e); String msg = String.format( "SecondaryNameNode principal not considered, %s = %s, %s = %s", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); LOG.warn(msg); } // Check the full principal name of all the configured valid requestors. for (String v : validRequestors) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is comparing to valid requestor: " + v); if (v != null && v.equals(ugi.getUserName())) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + ugi.getUserName()); return true; } } // Additionally, we compare the short name of the requestor to this JN's // username, because we want to allow requests from other JNs during // recovery, but we can't enumerate the full list of JNs. if (ugi.getShortUserName().equals( UserGroupInformation.getLoginUser().getShortUserName())) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing other JN principal: " + ugi.getUserName()); return true; } if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + ugi.getUserName()); return false; }
@Test public void testRequestShortName() throws IOException { // Test: Make a request from a namenode HttpServletRequest request = mock(HttpServletRequest.class); when(request.getParameter(UserParam.NAME)).thenReturn("jn/localhost@REALM.TLD"); boolean isValid = SERVLET.isValidRequestor(request, CONF); assertThat(isValid).isTrue(); }
protected Set<String> rolesFor(PrincipalCollection principals, String userNameIn, final LdapContext ldapCtx, final LdapContextFactory ldapContextFactory, Session session) throws NamingException { final Set<String> roleNames = new HashSet<>(); final Set<String> groupNames = new HashSet<>(); final String userName; if (getUserLowerCase()) { LOGGER.debug("userLowerCase true"); userName = userNameIn.toLowerCase(); } else { userName = userNameIn; } String userDn = getUserDnForSearch(userName); // Activate paged results int pageSize = getPagingSize(); LOGGER.debug("Ldap PagingSize: {}", pageSize); int numResults = 0; try { ldapCtx.addToEnvironment(Context.REFERRAL, "ignore"); ldapCtx.setRequestControls(new Control[]{new PagedResultsControl(pageSize, Control.NONCRITICAL)}); // ldapsearch -h localhost -p 33389 -D // uid=guest,ou=people,dc=hadoop,dc=apache,dc=org -w guest-password // -b dc=hadoop,dc=apache,dc=org -s sub '(objectclass=*)' NamingEnumeration<SearchResult> searchResultEnum = null; SearchControls searchControls = getGroupSearchControls(); try { if (groupSearchEnableMatchingRuleInChain) { searchResultEnum = ldapCtx.search( getGroupSearchBase(), String.format( MATCHING_RULE_IN_CHAIN_FORMAT, groupObjectClass, memberAttribute, userDn), searchControls); while (searchResultEnum != null && searchResultEnum.hasMore()) { // searchResults contains all the groups in search scope numResults++; final SearchResult group = searchResultEnum.next(); Attribute attribute = group.getAttributes().get(getGroupIdAttribute()); String groupName = attribute.get().toString(); String roleName = roleNameFor(groupName); if (roleName != null) { roleNames.add(roleName); } else { roleNames.add(groupName); } } } else { // Default group search filter String searchFilter = String.format("(objectclass=%1$s)", groupObjectClass); // If group search filter is defined in Shiro config, then use it if (groupSearchFilter != null) { searchFilter = expandTemplate(groupSearchFilter, userName); //searchFilter = String.format("%1$s", groupSearchFilter); } LOGGER.debug("Group SearchBase|SearchFilter|GroupSearchScope: " + "{}|{}|{}", getGroupSearchBase(), searchFilter, groupSearchScope); searchResultEnum = ldapCtx.search( getGroupSearchBase(), searchFilter, searchControls); while (searchResultEnum != null && searchResultEnum.hasMore()) { // searchResults contains all the groups in search scope numResults++; final SearchResult group = searchResultEnum.next(); addRoleIfMember(userDn, group, roleNames, groupNames, ldapContextFactory); } } } catch (PartialResultException e) { LOGGER.debug("Ignoring PartitalResultException"); } finally { if (searchResultEnum != null) { searchResultEnum.close(); } } // Re-activate paged results ldapCtx.setRequestControls(new Control[]{new PagedResultsControl(pageSize, null, Control.CRITICAL)}); } catch (SizeLimitExceededException e) { LOGGER.info("Only retrieved first {} groups due to SizeLimitExceededException.", numResults); } catch (IOException e) { LOGGER.error("Unabled to setup paged results"); } // save role names and group names in session so that they can be // easily looked up outside of this object session.setAttribute(SUBJECT_USER_ROLES, roleNames); session.setAttribute(SUBJECT_USER_GROUPS, groupNames); if (!groupNames.isEmpty() && (principals instanceof MutablePrincipalCollection)) { ((MutablePrincipalCollection) principals).addAll(groupNames, getName()); } LOGGER.debug("User RoleNames: {}::{}", userName, roleNames); return roleNames; }
@Test void testRolesFor() throws NamingException { LdapRealm realm = new LdapRealm(); realm.setGroupSearchBase("cn=groups,dc=apache"); realm.setGroupObjectClass("posixGroup"); realm.setMemberAttributeValueTemplate("cn={0},ou=people,dc=apache"); HashMap<String, String> rolesByGroups = new HashMap<>(); rolesByGroups.put("group-three", "zeppelin-role"); realm.setRolesByGroup(rolesByGroups); LdapContextFactory ldapContextFactory = mock(LdapContextFactory.class); LdapContext ldapCtx = mock(LdapContext.class); Session session = mock(Session.class); // expected search results BasicAttributes group1 = new BasicAttributes(); group1.put(realm.getGroupIdAttribute(), "group-one"); group1.put(realm.getMemberAttribute(), "principal"); // user doesn't belong to this group BasicAttributes group2 = new BasicAttributes(); group2.put(realm.getGroupIdAttribute(), "group-two"); group2.put(realm.getMemberAttribute(), "someoneelse"); // mapped to a different Zeppelin role BasicAttributes group3 = new BasicAttributes(); group3.put(realm.getGroupIdAttribute(), "group-three"); group3.put(realm.getMemberAttribute(), "principal"); NamingEnumeration<SearchResult> results = enumerationOf(group1, group2, group3); when(ldapCtx.search(any(String.class), any(String.class), any(SearchControls.class))) .thenReturn(results); Set<String> roles = realm.rolesFor( new SimplePrincipalCollection("principal", "ldapRealm"), "principal", ldapCtx, ldapContextFactory, session); verify(ldapCtx).search("cn=groups,dc=apache", "(objectclass=posixGroup)", realm.getGroupSearchControls()); assertEquals(new HashSet<>(Arrays.asList("group-one", "zeppelin-role")), roles); }
@Override public String selectForUpdateSkipLocked() { return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : ""; }
@Test void mySQL5DoesNotSupportSelectForUpdateSkipLocked() { assertThat(new MySqlDialect("MySQL", "5.8").selectForUpdateSkipLocked()).isEmpty(); }
public UserIdentity create(GsonUser gsonUser, @Nullable GsonEmails gsonEmails) { UserIdentity.Builder builder = UserIdentity.builder() .setProviderId(gsonUser.getUuid()) .setProviderLogin(gsonUser.getUsername()) .setName(generateName(gsonUser)); if (gsonEmails != null) { builder.setEmail(gsonEmails.extractPrimaryEmail()); } return builder.build(); }
@Test public void empty_name_is_replaced_by_provider_login() { GsonUser gson = new GsonUser("john", "", "ABCD"); UserIdentity identity = underTest.create(gson, null); assertThat(identity.getName()).isEqualTo("john"); }
public static Optional<Throwable> findThrowableInChain(Predicate<Throwable> condition, @Nullable Throwable t) { final Set<Throwable> seen = new HashSet<>(); while (t != null && !seen.contains(t)) { if (condition.test(t)) { return Optional.of(t); } seen.add(t); t = t.getCause(); } return Optional.empty(); }
@Test void findsChainedException() { final RuntimeException first = new RuntimeException("first"); final RuntimeException second = new RuntimeException("second", first); final RuntimeException third = new RuntimeException("third", second); assertThat(findThrowableInChain(t -> "third".equals(t.getMessage()), third)).contains(third); assertThat(findThrowableInChain(t -> "second".equals(t.getMessage()), third)).contains(second); assertThat(findThrowableInChain(t -> "first".equals(t.getMessage()), third)).contains(first); assertThat(findThrowableInChain(t -> false, third)).isEmpty(); }
@Override public <T extends GetWorkBudgetSpender> void distributeBudget( ImmutableCollection<T> budgetOwners, GetWorkBudget getWorkBudget) { if (budgetOwners.isEmpty()) { LOG.debug("Cannot distribute budget to no owners."); return; } if (getWorkBudget.equals(GetWorkBudget.noBudget())) { LOG.debug("Cannot distribute 0 budget."); return; } Map<T, GetWorkBudget> desiredBudgets = computeDesiredBudgets(budgetOwners, getWorkBudget); for (Entry<T, GetWorkBudget> streamAndDesiredBudget : desiredBudgets.entrySet()) { GetWorkBudgetSpender getWorkBudgetSpender = streamAndDesiredBudget.getKey(); GetWorkBudget desired = streamAndDesiredBudget.getValue(); GetWorkBudget remaining = getWorkBudgetSpender.remainingBudget(); if (isBelowFiftyPercentOfTarget(remaining, desired)) { GetWorkBudget adjustment = desired.subtract(remaining); getWorkBudgetSpender.adjustBudget(adjustment); } } }
@Test public void testDistributeBudget_adjustsStreamBudgetWhenRemainingItemBudgetTooLowWithNoActiveWork() { GetWorkBudget streamRemainingBudget = GetWorkBudget.builder().setItems(1L).setBytes(10L).build(); GetWorkBudget totalGetWorkBudget = GetWorkBudget.builder().setItems(10L).setBytes(10L).build(); GetWorkBudgetSpender getWorkBudgetSpender = spy(createGetWorkBudgetOwnerWithRemainingBudgetOf(streamRemainingBudget)); createBudgetDistributor(0L) .distributeBudget(ImmutableList.of(getWorkBudgetSpender), totalGetWorkBudget); verify(getWorkBudgetSpender, times(1)) .adjustBudget( eq(totalGetWorkBudget.items() - streamRemainingBudget.items()), eq(totalGetWorkBudget.bytes() - streamRemainingBudget.bytes())); }
@Override public boolean isAbsentSince(AlluxioURI path, long absentSince) { MountInfo mountInfo = getMountInfo(path); if (mountInfo == null) { return false; } AlluxioURI mountBaseUri = mountInfo.getAlluxioUri(); while (path != null && !path.equals(mountBaseUri)) { Pair<Long, Long> cacheResult = mCache.getIfPresent(path.getPath()); if (cacheResult != null && cacheResult.getFirst() != null && cacheResult.getSecond() != null && cacheResult.getFirst() >= absentSince && cacheResult.getSecond() == mountInfo.getMountId()) { return true; } path = path.getParent(); } // Reached the root, without finding anything in the cache. return false; }
@Test public void removeMountPoint() throws Exception { String ufsBase = "/a/b"; String alluxioBase = "/mnt" + ufsBase; // Create ufs directories assertTrue((new File(mLocalUfsPath + ufsBase)).mkdirs()); // 'base + /c' will be the first absent path process(new AlluxioURI(alluxioBase + "/c/d")); checkPaths(new AlluxioURI(alluxioBase + "/c")); // Unmount assertTrue( mMountTable.delete(NoopJournalContext.INSTANCE, new AlluxioURI("/mnt"), true)); // Re-mount the same ufs long newMountId = IdUtils.getRandomNonNegativeLong(); MountPOptions options = MountContext.defaults().getOptions().build(); mUfsManager.addMount(newMountId, new AlluxioURI(mLocalUfsPath), new UnderFileSystemConfiguration(Configuration.global(), options.getReadOnly()) .createMountSpecificConf(Collections.<String, String>emptyMap())); mMountTable.add(NoopJournalContext.INSTANCE, new AlluxioURI("/mnt"), new AlluxioURI(mLocalUfsPath), newMountId, options); // The cache should not contain any paths now. assertFalse(mUfsAbsentPathCache.isAbsentSince(new AlluxioURI("/mnt/a/b/c/d"), UfsAbsentPathCache.ALWAYS)); assertFalse(mUfsAbsentPathCache.isAbsentSince(new AlluxioURI("/mnt/a/b/c"), UfsAbsentPathCache.ALWAYS)); assertFalse(mUfsAbsentPathCache.isAbsentSince(new AlluxioURI("/mnt/a/b"), UfsAbsentPathCache.ALWAYS)); assertFalse(mUfsAbsentPathCache.isAbsentSince(new AlluxioURI("/mnt/a"), UfsAbsentPathCache.ALWAYS)); assertFalse(mUfsAbsentPathCache.isAbsentSince(new AlluxioURI("/mnt/"), UfsAbsentPathCache.ALWAYS)); }
public static ThrowableType getThrowableType(Throwable cause) { final ThrowableAnnotation annotation = cause.getClass().getAnnotation(ThrowableAnnotation.class); return annotation == null ? ThrowableType.RecoverableError : annotation.value(); }
@Test void testThrowableType_PartitionDataMissingError() { assertThat( ThrowableClassifier.getThrowableType( new TestPartitionDataMissingErrorException())) .isEqualTo(ThrowableType.PartitionDataMissingError); }
public Repository getRepo(String serverUrl, String token, String project, String repoSlug) { HttpUrl url = buildUrl(serverUrl, format("/rest/api/1.0/projects/%s/repos/%s", project, repoSlug)); return doGet(token, url, body -> buildGson().fromJson(body, Repository.class)); }
@Test public void error_handling() { server.enqueue(new MockResponse() .setHeader("Content-Type", "application/json;charset=UTF-8") .setResponseCode(400) .setBody("{\n" + " \"errors\": [\n" + " {\n" + " \"context\": null,\n" + " \"message\": \"Bad message\",\n" + " \"exceptionName\": \"com.atlassian.bitbucket.auth.BadException\"\n" + " }\n" + " ]\n" + "}")); String serverUrl = server.url("/").toString(); assertThatThrownBy(() -> underTest.getRepo(serverUrl, "token", "", "")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unable to contact Bitbucket server"); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test public void recipientPaysFees() throws Exception { sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, COIN); // Simplest recipientPaysFees use case Coin valueToSend = CENT.divide(2); SendRequest request = SendRequest.to(OTHER_ADDRESS, valueToSend); request.feePerKb = Transaction.DEFAULT_TX_FEE; request.ensureMinRequiredFee = true; request.recipientsPayFees = true; request.shuffleOutputs = false; wallet.completeTx(request); // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee = request.feePerKb.multiply(227).divide(1000); assertEquals(fee, request.tx.getFee()); Transaction spend = request.tx; assertEquals(2, spend.getOutputs().size()); assertEquals(valueToSend.subtract(fee), spend.getOutput(0).getValue()); assertEquals(COIN.subtract(valueToSend), spend.getOutput(1).getValue()); assertEquals(1, spend.getInputs().size()); assertEquals(COIN, spend.getInput(0).getValue()); // Fee is split between the 2 outputs SendRequest request2 = SendRequest.to(OTHER_ADDRESS, valueToSend); request2.tx.addOutput(valueToSend, OTHER_ADDRESS); request2.feePerKb = Transaction.DEFAULT_TX_FEE; request2.ensureMinRequiredFee = true; request2.recipientsPayFees = true; request2.shuffleOutputs = false; wallet.completeTx(request2); // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee2 = request2.feePerKb.multiply(261).divide(1000); assertEquals(fee2, request2.tx.getFee()); Transaction spend2 = request2.tx; assertEquals(3, spend2.getOutputs().size()); assertEquals(valueToSend.subtract(fee2.divide(2)), spend2.getOutput(0).getValue()); assertEquals(valueToSend.subtract(fee2.divide(2)), spend2.getOutput(1).getValue()); assertEquals(COIN.subtract(valueToSend.multiply(2)), spend2.getOutput(2).getValue()); assertEquals(1, spend2.getInputs().size()); assertEquals(COIN, spend2.getInput(0).getValue()); // Fee is split between the 3 outputs. Division has a remainder which is taken from the first output SendRequest request3 = SendRequest.to(OTHER_ADDRESS, valueToSend); request3.tx.addOutput(valueToSend, OTHER_ADDRESS); request3.tx.addOutput(valueToSend, OTHER_ADDRESS); request3.feePerKb = Transaction.DEFAULT_TX_FEE; request3.ensureMinRequiredFee = true; request3.recipientsPayFees = true; request3.shuffleOutputs = false; wallet.completeTx(request3); // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee3 = request3.feePerKb.multiply(295).divide(1000); assertEquals(fee3, request3.tx.getFee()); Transaction spend3 = request3.tx; assertEquals(4, spend3.getOutputs().size()); // 1st output pays the fee division remainder assertEquals(valueToSend.subtract(fee3.divideAndRemainder(3)[0]).subtract(fee3.divideAndRemainder(3)[1]), spend3.getOutput(0).getValue()); assertEquals(valueToSend.subtract(fee3.divide(3)), spend3.getOutput(1).getValue()); assertEquals(valueToSend.subtract(fee3.divide(3)), spend3.getOutput(2).getValue()); assertEquals(COIN.subtract(valueToSend.multiply(3)), spend3.getOutput(3).getValue()); assertEquals(1, spend3.getInputs().size()); assertEquals(COIN, spend3.getInput(0).getValue()); // Output when subtracted fee is dust // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee4 = Transaction.DEFAULT_TX_FEE.multiply(227).divide(1000); Coin dustThreshold = new TransactionOutput(null, Coin.COIN, OTHER_ADDRESS).getMinNonDustValue(); valueToSend = fee4.add(dustThreshold).subtract(SATOSHI); SendRequest request4 = SendRequest.to(OTHER_ADDRESS, valueToSend); request4.feePerKb = Transaction.DEFAULT_TX_FEE; request4.ensureMinRequiredFee = true; request4.recipientsPayFees = true; request4.shuffleOutputs = false; try { wallet.completeTx(request4); fail("Expected CouldNotAdjustDownwards exception"); } catch (Wallet.CouldNotAdjustDownwards e) { } // Change is dust, so it is incremented to min non dust value. First output value is reduced to compensate. // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee5 = Transaction.DEFAULT_TX_FEE.multiply(261).divide(1000); valueToSend = COIN.divide(2).subtract(Coin.MICROCOIN); SendRequest request5 = SendRequest.to(OTHER_ADDRESS, valueToSend); request5.tx.addOutput(valueToSend, OTHER_ADDRESS); request5.feePerKb = Transaction.DEFAULT_TX_FEE; request5.ensureMinRequiredFee = true; request5.recipientsPayFees = true; request5.shuffleOutputs = false; wallet.completeTx(request5); assertEquals(fee5, request5.tx.getFee()); Transaction spend5 = request5.tx; assertEquals(3, spend5.getOutputs().size()); Coin valueSubtractedFromFirstOutput = dustThreshold .subtract(COIN.subtract(valueToSend.multiply(2))); assertEquals(valueToSend.subtract(fee5.divide(2)).subtract(valueSubtractedFromFirstOutput), spend5.getOutput(0).getValue()); assertEquals(valueToSend.subtract(fee5.divide(2)), spend5.getOutput(1).getValue()); assertEquals(dustThreshold, spend5.getOutput(2).getValue()); assertEquals(1, spend5.getInputs().size()); assertEquals(COIN, spend5.getInput(0).getValue()); // Change is dust, so it is incremented to min non dust value. First output value is about to be reduced to // compensate, but after subtracting some satoshis, first output is dust. // Hardcoded tx length because actual length may vary depending on actual signature length Coin fee6 = Transaction.DEFAULT_TX_FEE.multiply(261).divide(1000); Coin valueToSend1 = fee6.divide(2).add(dustThreshold).add(Coin.MICROCOIN); Coin valueToSend2 = COIN.subtract(valueToSend1).subtract(Coin.MICROCOIN.multiply(2)); SendRequest request6 = SendRequest.to(OTHER_ADDRESS, valueToSend1); request6.tx.addOutput(valueToSend2, OTHER_ADDRESS); request6.feePerKb = Transaction.DEFAULT_TX_FEE; request6.ensureMinRequiredFee = true; request6.recipientsPayFees = true; request6.shuffleOutputs = false; try { wallet.completeTx(request6); fail("Expected CouldNotAdjustDownwards exception"); } catch (Wallet.CouldNotAdjustDownwards e) { } }
public boolean containsAddress(Address address) { for (MemberInfo member : members) { if (member.getAddress().equals(address)) { return true; } } return false; }
@Test public void containsAddress() { MemberImpl[] members = MemberMapTest.newMembers(3); MembersView view = MembersView.createNew(1, Arrays.asList(members)); for (MemberImpl member : members) { assertTrue(view.containsAddress(member.getAddress())); } }
protected int calculateConcurency() { final int customLimit = filterConcurrencyCustom.get(); return customLimit != DEFAULT_FILTER_CONCURRENCY_LIMIT ? customLimit : filterConcurrencyDefault.get(); }
@Test void validateDefaultConcurrencyLimit() { final int[] limit = {0}; class ConcInboundFilter extends BaseFilter { @Override public Observable applyAsync(ZuulMessage input) { limit[0] = calculateConcurency(); return Observable.just("Done"); } @Override public FilterType filterType() { return FilterType.INBOUND; } @Override public boolean shouldFilter(ZuulMessage msg) { return true; } } new ConcInboundFilter().applyAsync(new ZuulMessageImpl(new SessionContext(), new Headers())); Truth.assertThat(limit[0]).isEqualTo(4000); }
public static MetadataCoder of() { return INSTANCE; }
@Test public void testCoderSerializable() { CoderProperties.coderSerializable(MetadataCoder.of()); }
public static <Req extends RpcRequest> Matcher<Req> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new RpcMethodEquals<Req>(method); }
@Test void methodEquals_unmatched_mixedCase() { when(request.method()).thenReturn("Check"); assertThat(methodEquals("check").matches(request)).isFalse(); }
public FEELFnResult<List<Object>> invoke( @ParameterName( "list" ) List list, @ParameterName( "item" ) Object[] items ) { return invoke((Object) list, items); }
@Test void invokeInvalidParams() { FunctionTestUtil.assertResultError(appendFunction.invoke((List) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(appendFunction.invoke((List) null, new Object[]{}), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(appendFunction.invoke(Collections.emptyList(), null), InvalidParametersEvent.class); }
@Override public UpdateResponse delete(Long key) { final boolean isRemoved = (_db.getData().remove(key) != null); // Remove this photo from all albums to maintain referential integrity. AlbumEntryResource.purge(_entryDb, null, key); return new UpdateResponse(isRemoved ? HttpStatus.S_204_NO_CONTENT : HttpStatus.S_404_NOT_FOUND); }
@Test public void testResourceDelete() { final Long id = createPhoto(); // validate response status code final UpdateResponse uResp = _res.delete(id); Assert.assertEquals(uResp.getStatus(), HttpStatus.S_204_NO_CONTENT); }
public static List<FieldValueGetter> getGetters( TypeDescriptor<?> typeDescriptor, Schema schema, FieldValueTypeSupplier fieldValueTypeSupplier, TypeConversionsFactory typeConversionsFactory) { return CACHED_GETTERS.computeIfAbsent( TypeDescriptorWithSchema.create(typeDescriptor, schema), c -> { List<FieldValueTypeInformation> types = fieldValueTypeSupplier.get(typeDescriptor, schema); return types.stream() .map(t -> createGetter(t, typeConversionsFactory)) .collect(Collectors.toList()); }); }
@Test public void testGeneratedSimpleBoxedGetters() { BeanWithBoxedFields bean = new BeanWithBoxedFields(); bean.setaByte((byte) 41); bean.setaShort((short) 42); bean.setAnInt(43); bean.setaLong(44L); bean.setaBoolean(true); List<FieldValueGetter> getters = JavaBeanUtils.getGetters( new TypeDescriptor<BeanWithBoxedFields>() {}, BEAN_WITH_BOXED_FIELDS_SCHEMA, new JavaBeanSchema.GetterTypeSupplier(), new DefaultTypeConversionsFactory()); assertEquals((byte) 41, getters.get(0).get(bean)); assertEquals((short) 42, getters.get(1).get(bean)); assertEquals((int) 43, getters.get(2).get(bean)); assertEquals((long) 44, getters.get(3).get(bean)); assertTrue((Boolean) getters.get(4).get(bean)); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateIdWithWrongType() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("StateId"); thrown.expectMessage("StateSpec"); thrown.expectMessage(not(mentionsTimers())); DoFnSignatures.getSignature( new DoFn<String, String>() { @StateId("foo") private final String bizzle = "bazzle"; @ProcessElement public void foo(ProcessContext context) {} }.getClass()); }
@Override public IcebergEnumeratorState snapshotState(long checkpointId) { return new IcebergEnumeratorState( enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot()); }
@Test public void testTransientPlanningErrorsWithSuccessfulRetry() throws Exception { TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext = new TestingSplitEnumeratorContext<>(4); ScanContext scanContext = ScanContext.builder() .streaming(true) .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT) .maxPlanningSnapshotCount(1) .maxAllowedPlanningFailures(2) .build(); ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 1); ContinuousIcebergEnumerator enumerator = createEnumerator(enumeratorContext, scanContext, splitPlanner); // Make one split available and trigger the periodic discovery List<IcebergSourceSplit> splits = SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1); splitPlanner.addSplits(splits); // Trigger a planning and check that no splits returned due to the planning error enumeratorContext.triggerAllActions(); assertThat(enumerator.snapshotState(2).pendingSplits()).isEmpty(); // Second scan planning should succeed and discover the expected splits enumeratorContext.triggerAllActions(); Collection<IcebergSourceSplitState> pendingSplits = enumerator.snapshotState(3).pendingSplits(); assertThat(pendingSplits).hasSize(1); IcebergSourceSplitState pendingSplit = pendingSplits.iterator().next(); assertThat(pendingSplit.split().splitId()).isEqualTo(splits.get(0).splitId()); assertThat(pendingSplit.status()).isEqualTo(IcebergSourceSplitStatus.UNASSIGNED); }
@Override public long putIfAbsent(long key, long value) { assert value != nullValue : "putIfAbsent() called with null-sentinel value " + nullValue; SlotAssignmentResult slot = hsa.ensure(key); if (slot.isNew()) { mem.putLong(slot.address(), value); return nullValue; } else { return mem.getLong(slot.address()); } }
@Test public void testPutIfAbsent_success() { long key = newKey(); long value = newValue(); assertEqualsKV(MISSING_VALUE, map.putIfAbsent(key, value), key, value); }
public synchronized boolean ensureEraseLater(long id, long currentTimeMs) { // 1. not in idToRecycleTime, maybe already erased, sorry it's too late! if (!idToRecycleTime.containsKey(id)) { return false; } // 2. will expire after quite a long time, don't worry long latency = currentTimeMs - idToRecycleTime.get(id); if (latency < (Config.catalog_trash_expire_second - LATE_RECYCLE_INTERVAL_SECONDS) * 1000L) { return true; } // 3. already expired, sorry. if (latency > Config.catalog_trash_expire_second * 1000L) { return false; } enableEraseLater.add(id); return true; }
@Test public void testEnsureEraseLater() { Config.catalog_trash_expire_second = 600; // set expire in 10 minutes CatalogRecycleBin recycleBin = new CatalogRecycleBin(); Database db = new Database(111, "uno"); recycleBin.recycleDatabase(db, new HashSet<>()); // no need to set enable erase later if there are a lot of time left long now = System.currentTimeMillis(); Assert.assertTrue(recycleBin.ensureEraseLater(db.getId(), now)); Assert.assertFalse(recycleBin.enableEraseLater.contains(db.getId())); // no need to set enable erase later if already exipre long moreThanTenMinutesLater = now + 620 * 1000L; Assert.assertFalse(recycleBin.ensureEraseLater(db.getId(), moreThanTenMinutesLater)); Assert.assertFalse(recycleBin.enableEraseLater.contains(db.getId())); // now we should set enable erase later because we are about to expire long moreThanNineMinutesLater = now + 550 * 1000L; Assert.assertTrue(recycleBin.ensureEraseLater(db.getId(), moreThanNineMinutesLater)); Assert.assertTrue(recycleBin.enableEraseLater.contains(db.getId())); // if already expired, we should return false but won't erase the flag Assert.assertFalse(recycleBin.ensureEraseLater(db.getId(), moreThanTenMinutesLater)); Assert.assertTrue(recycleBin.enableEraseLater.contains(db.getId())); }
public String send() throws MailException { try { return doSend(); } catch (MessagingException e) { if (e instanceof SendFailedException) { // 当地址无效时,显示更加详细的无效地址信息 final Address[] invalidAddresses = ((SendFailedException) e).getInvalidAddresses(); final String msg = StrUtil.format("Invalid Addresses: {}", ArrayUtil.toString(invalidAddresses)); throw new MailException(msg, e); } throw new MailException(e); } }
@Test @Disabled public void sendWithLongNameFileTest() { //附件名长度大于60时的测试 MailUtil.send("hutool@foxmail.com", "测试", "<h1>邮件来自Hutool测试</h1>", true, FileUtil.file("d:/6-LongLong一阶段平台建设周报2018.3.12-3.16.xlsx")); }
public static boolean and(boolean... array) { if (ArrayUtil.isEmpty(array)) { throw new IllegalArgumentException("The Array must not be empty !"); } for (final boolean element : array) { if (false == element) { return false; } } return true; }
@Test public void andTest(){ assertFalse(BooleanUtil.and(true,false)); assertFalse(BooleanUtil.andOfWrap(true,false)); }
public static boolean checkIsBashSupported() throws InterruptedIOException { if (Shell.WINDOWS) { return false; } ShellCommandExecutor shexec; boolean supported = true; try { String[] args = {"bash", "-c", "echo 1000"}; shexec = new ShellCommandExecutor(args); shexec.execute(); } catch (InterruptedIOException iioe) { LOG.warn("Interrupted, unable to determine if bash is supported", iioe); throw iioe; } catch (IOException ioe) { LOG.warn("Bash is not supported by the OS", ioe); supported = false; } catch (SecurityException se) { LOG.info("Bash execution is not allowed by the JVM " + "security manager.Considering it not supported."); supported = false; } return supported; }
@Test public void testIsBashSupported() throws InterruptedIOException { assumeTrue("Bash is not supported", Shell.checkIsBashSupported()); }
@VisibleForTesting MemoryTrackingContext getQueryMemoryContext() { return queryMemoryContext; }
@Test public void testChecksTotalMemoryOnUserMemoryAllocationWithBroadcastEnable() { MemoryPool generalPool = new MemoryPool(GENERAL_POOL, new DataSize(10, BYTE)); try (LocalQueryRunner localQueryRunner = new LocalQueryRunner(TEST_SESSION)) { QueryContext queryContext = new QueryContext( new QueryId("query"), new DataSize(10, BYTE), // user memory limit new DataSize(20, BYTE), // total memory limit new DataSize(10, BYTE), new DataSize(1, GIGABYTE), generalPool, new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), new DataSize(0, BYTE), new SpillSpaceTracker(new DataSize(0, BYTE)), listJsonCodec(TaskMemoryReservationSummary.class)); queryContext.getQueryMemoryContext().initializeLocalMemoryContexts("test"); LocalMemoryContext systemMemoryContext = queryContext.getQueryMemoryContext().localSystemMemoryContext(); LocalMemoryContext userMemoryContext = queryContext.getQueryMemoryContext().localUserMemoryContext(); try { systemMemoryContext.setBytes(15, true); userMemoryContext.setBytes(6); } catch (ExceededMemoryLimitException e) { assertTrue(e.getMessage().contains("Query exceeded per-node broadcast memory limit of 10B")); assertEquals(generalPool.getReservedBytes(), 0); } } }
@SqlNullable @Description("Returns a line string representing the exterior ring of the POLYGON") @ScalarFunction("ST_ExteriorRing") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stExteriorRing(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { Geometry geometry = deserialize(input); validateType("ST_ExteriorRing", geometry, EnumSet.of(POLYGON)); if (geometry.isEmpty()) { return null; } return serialize(((org.locationtech.jts.geom.Polygon) geometry).getExteriorRing()); }
@Test public void testSTExteriorRing() { assertFunction("ST_AsText(ST_ExteriorRing(ST_GeometryFromText('POLYGON EMPTY')))", VARCHAR, null); assertFunction("ST_AsText(ST_ExteriorRing(ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 1, 1 1))')))", VARCHAR, "LINESTRING (1 1, 1 4, 4 1, 1 1)"); assertFunction("ST_AsText(ST_ExteriorRing(ST_GeometryFromText('POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1))')))", VARCHAR, "LINESTRING (0 0, 0 5, 5 5, 5 0, 0 0)"); assertInvalidFunction("ST_AsText(ST_ExteriorRing(ST_GeometryFromText('LINESTRING (1 1, 2 2, 1 3)')))", "ST_ExteriorRing only applies to POLYGON. Input type is: LINE_STRING"); assertInvalidFunction("ST_AsText(ST_ExteriorRing(ST_GeometryFromText('MULTIPOLYGON (((1 1, 2 2, 1 3, 1 1)), ((4 4, 5 5, 4 6, 4 4)))')))", "ST_ExteriorRing only applies to POLYGON. Input type is: MULTI_POLYGON"); }
@Override public Headers headers() { throw new UnsupportedOperationException("StateStores can't access headers."); }
@Test public void shouldThrowOnHeaders() { assertThrows(UnsupportedOperationException.class, () -> context.headers()); }
@Override public boolean authenticate(final ShardingSphereUser user, final Object[] authInfo) { byte[] authResponse = (byte[]) authInfo[0]; byte[] password = new byte[authResponse.length - 1]; System.arraycopy(authResponse, 0, password, 0, authResponse.length - 1); return Strings.isNullOrEmpty(user.getPassword()) || user.getPassword().equals(new String(password)); }
@Test void assertAuthenticate() { ShardingSphereUser user = new ShardingSphereUser("foo", "password", "%"); byte[] password = "password".getBytes(); byte[] authInfo = new byte[password.length + 1]; System.arraycopy(password, 0, authInfo, 0, password.length); assertTrue(new MySQLClearPasswordAuthenticator().authenticate(user, new Object[]{authInfo})); }
Handler getReadinessHandler() { return new AbstractHandler() { @Override public void handle(String s, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("application/json"); response.setCharacterEncoding("UTF-8"); baseRequest.setHandled(true); if (brokerState != null) { byte observedState = (byte) brokerState.value(); boolean stateIsRunning = BROKER_RUNNING_STATE <= observedState && BROKER_UNKNOWN_STATE != observedState; if (stateIsRunning) { LOGGER.trace("Broker is in running according to {}. The current state is {}", brokerStateName, observedState); response.setStatus(HttpServletResponse.SC_NO_CONTENT); } else { LOGGER.trace("Broker is not running according to {}. The current state is {}", brokerStateName, observedState); response.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); response.getWriter().print("Readiness failed: brokerState is " + observedState); } } else { LOGGER.warn("Broker state metric not found"); response.setStatus(HttpServletResponse.SC_NOT_FOUND); response.getWriter().print("Broker state metric not found"); } } }; }
@Test public void testReadinessSuccess() throws Exception { @SuppressWarnings({ "rawtypes" }) final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 3); KafkaAgent agent = new KafkaAgent(brokerState, null, null, null); context.setHandler(agent.getReadinessHandler()); server.setHandler(context); server.start(); HttpResponse<String> response = HttpClient.newBuilder() .build() .send(req, HttpResponse.BodyHandlers.ofString()); assertThat(HttpServletResponse.SC_NO_CONTENT, is(response.statusCode())); }
@Override public byte[] get(byte[] key) { return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); }
@Test public void testGeo() { RedisTemplate<String, String> redisTemplate = new RedisTemplate<>(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); String key = "test_geo_key"; Point point = new Point(116.401001, 40.119499); redisTemplate.opsForGeo().add(key, point, "a"); point = new Point(111.545998, 36.133499); redisTemplate.opsForGeo().add(key, point, "b"); point = new Point(111.483002, 36.030998); redisTemplate.opsForGeo().add(key, point, "c"); Circle within = new Circle(116.401001, 40.119499, 80000); RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates(); GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args); assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a"); }
@SuppressWarnings("java:S4973") public static boolean equalsIgnoreCase(String str1, String str2) { return (str1 != null && str2 != null) && (str1 == str2 || lowerCaseInternal(str1).equals(lowerCaseInternal(str2))); }
@Test void testEqualsIgnoreCase() { assertFalse(StringUtil.equalsIgnoreCase(null, null)); assertFalse(StringUtil.equalsIgnoreCase(null, "a")); assertFalse(StringUtil.equalsIgnoreCase("a", null)); assertTrue(StringUtil.equalsIgnoreCase("TEST", "test")); assertTrue(StringUtil.equalsIgnoreCase("test", "TEST")); assertFalse(StringUtil.equalsIgnoreCase("test", "TEST2")); Locale defaultLocale = Locale.getDefault(); Locale.setDefault(new Locale("tr")); try { assertTrue(StringUtil.equalsIgnoreCase("EXIT", "exit")); assertFalse(StringUtil.equalsIgnoreCase("exıt", "EXIT")); } finally { Locale.setDefault(defaultLocale); } }
public Map<String, Parameter> generateMergedStepParams( WorkflowSummary workflowSummary, Step stepDefinition, StepRuntime stepRuntime, StepRuntimeSummary runtimeSummary) { Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>(); // Start with default step level params if present Map<String, ParamDefinition> globalDefault = defaultParamManager.getDefaultStepParams(); if (globalDefault != null) { ParamsMergeHelper.mergeParams( allParamDefs, globalDefault, ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT)); } // Merge in injected params returned by step if present (template schema) Map<String, ParamDefinition> injectedParams = stepRuntime.injectRuntimeParams(workflowSummary, stepDefinition); maybeOverrideParamType(allParamDefs); if (injectedParams != null) { maybeOverrideParamType(injectedParams); ParamsMergeHelper.mergeParams( allParamDefs, injectedParams, ParamsMergeHelper.MergeContext.stepCreate(ParamSource.TEMPLATE_SCHEMA)); } // Merge in params applicable to step type Optional<Map<String, ParamDefinition>> defaultStepTypeParams = defaultParamManager.getDefaultParamsForType(stepDefinition.getType()); if (defaultStepTypeParams.isPresent()) { LOG.debug("Merging step level default for {}", stepDefinition.getType()); ParamsMergeHelper.mergeParams( allParamDefs, defaultStepTypeParams.get(), ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT)); } // Merge in workflow and step info ParamsMergeHelper.mergeParams( allParamDefs, injectWorkflowAndStepInfoParams(workflowSummary, runtimeSummary), ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_INJECTED)); // merge step run param and user provided restart step run params // first to get undefined params from both run param and restart params Map<String, ParamDefinition> undefinedRestartParams = new LinkedHashMap<>(); Optional<Map<String, ParamDefinition>> stepRestartParams = getUserStepRestartParam(workflowSummary, runtimeSummary); stepRestartParams.ifPresent(undefinedRestartParams::putAll); Optional<Map<String, ParamDefinition>> stepRunParams = getStepRunParams(workflowSummary, runtimeSummary); Map<String, ParamDefinition> systemInjectedRestartRunParams = new LinkedHashMap<>(); stepRunParams.ifPresent( params -> { params.forEach( (key, val) -> { if (runtimeSummary.getRestartConfig() != null && Constants.RESERVED_PARAM_NAMES.contains(key) && val.getMode() == ParamMode.CONSTANT && val.getSource() == ParamSource.SYSTEM_INJECTED) { ((AbstractParamDefinition) val) .getMeta() .put(Constants.METADATA_SOURCE_KEY, ParamSource.RESTART.name()); systemInjectedRestartRunParams.put(key, val); } }); systemInjectedRestartRunParams.keySet().forEach(params::remove); }); stepRunParams.ifPresent(undefinedRestartParams::putAll); Optional.ofNullable(stepDefinition.getParams()) .ifPresent( stepDefParams -> stepDefParams.keySet().stream() .filter(undefinedRestartParams::containsKey) .forEach(undefinedRestartParams::remove)); // Then merge undefined restart params if (!undefinedRestartParams.isEmpty()) { mergeUserProvidedStepParams(allParamDefs, undefinedRestartParams, workflowSummary); } // Final merge from step definition if (stepDefinition.getParams() != null) { maybeOverrideParamType(stepDefinition.getParams()); ParamsMergeHelper.mergeParams( allParamDefs, stepDefinition.getParams(), ParamsMergeHelper.MergeContext.stepCreate(ParamSource.DEFINITION)); } // merge step run params stepRunParams.ifPresent( stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary)); // merge all user provided restart step run params stepRestartParams.ifPresent( stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary)); // merge all system injected restart step run params with mode and source already set. allParamDefs.putAll(systemInjectedRestartRunParams); // Cleanup any params that are missing and convert to params return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs)); }
@Test public void testRestartSubworkflowStepRunParamMerge() throws IOException { DefaultParamManager defaultParamManager = new DefaultParamManager(JsonHelper.objectMapperWithYaml()); defaultParamManager.init(); ParamsManager paramsManager = new ParamsManager(defaultParamManager); for (String paramName : new String[] {"subworkflow_id", "subworkflow_version"}) { Map<String, Map<String, ParamDefinition>> stepRunParams = singletonMap( "stepid", singletonMap(paramName, ParamDefinition.buildParamDefinition(paramName, "1"))); Map<String, Map<String, ParamDefinition>> stepRestartParams = singletonMap( "stepid", singletonMap(paramName, ParamDefinition.buildParamDefinition(paramName, "2"))); ManualInitiator manualInitiator = new ManualInitiator(); workflowSummary.setInitiator(manualInitiator); workflowSummary.setStepRunParams(stepRunParams); workflowSummary.setRestartConfig( RestartConfig.builder() .addRestartNode("sample-wf-map-params", 1, "stepid") .stepRestartParams(stepRestartParams) .build()); workflowSummary.setRunPolicy(RunPolicy.RESTART_FROM_SPECIFIC); Step step = Mockito.mock(Step.class); when(step.getType()).thenReturn(StepType.SUBWORKFLOW); when(step.getParams()).thenReturn(null); AssertHelper.assertThrows( "Cannot modify param with MUTABLE_ON_START during restart", MaestroValidationException.class, "Cannot modify param with mode [MUTABLE_ON_START] for parameter [" + paramName + "]", () -> paramsManager.generateMergedStepParams( workflowSummary, step, stepRuntime, runtimeSummary)); } }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildGroupByMemoryMergedResultWithSQLServerLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "SQLServer")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); SQLServerSelectStatement selectStatement = (SQLServerSelectStatement) buildSelectStatement(new SQLServerSelectStatement()); ProjectionsSegment projectionsSegment = new ProjectionsSegment(0, 0); selectStatement.setProjections(projectionsSegment); selectStatement.setGroupBy(new GroupBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.ASC, NullsOrderType.FIRST)))); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralRowNumberValueSegment(0, 0, 1L, true), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createSQLServerDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(TopAndRowNumberDecoratorMergedResult.class)); assertThat(((TopAndRowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByMemoryMergedResult.class)); }
protected String changeOutboundMessage(String currentEnvelope) { logger.debug(currentEnvelope); //SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS return "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; }
@Test public void changeEnvelopMessageOutbound() throws IOException { Message message = new MessageImpl(); Exchange exchange = new ExchangeImpl(); exchange.setOutMessage(message); message.setExchange(exchange); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); outputStream.write("tesT".getBytes()); message.setContent(OutputStream.class, outputStream); message.setInterceptorChain(mock(InterceptorChain.class)); DebugChangeMessage debugChangeMessage = spy(new DebugChangeMessage()); debugChangeMessage.handleMessage(message); assertEquals( "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", message.getContent(OutputStream.class).toString()); verify(debugChangeMessage).changeOutboundMessage(anyString()); }
public synchronized boolean hasEndOfBlock() { return endOfBlockIndex >= 0; }
@Test public void testHasEndOfBlock() { assertFalse(instance.hasEndOfBlock(), "Unexpected initial value"); instance.write(MllpProtocolConstants.END_OF_BLOCK); assertFalse(instance.hasEndOfBlock(), "START_OF_BLOCK before an END_OF_BLOCK"); instance.reset(); assertFalse(instance.hasEndOfBlock()); instance.write(MllpProtocolConstants.START_OF_BLOCK); assertFalse(instance.hasEndOfBlock()); instance.write(TEST_HL7_MESSAGE.getBytes()); assertFalse(instance.hasEndOfBlock()); instance.write(MllpProtocolConstants.END_OF_BLOCK); assertTrue(instance.hasEndOfBlock()); instance.write(MllpProtocolConstants.END_OF_DATA); assertTrue(instance.hasEndOfBlock()); instance.reset(); assertFalse(instance.hasEndOfBlock()); instance.write(MllpProtocolConstants.START_OF_BLOCK); assertFalse(instance.hasEndOfBlock()); instance.write(TEST_HL7_MESSAGE.getBytes()); assertFalse(instance.hasEndOfBlock()); instance.write(MllpProtocolConstants.END_OF_BLOCK); assertTrue(instance.hasEndOfBlock()); instance.write("BLAH".getBytes()); assertTrue(instance.hasEndOfBlock()); }
@Override public Mono<SetRegistrationRecoveryPasswordResponse> setRegistrationRecoveryPassword(final SetRegistrationRecoveryPasswordRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); if (request.getRegistrationRecoveryPassword().isEmpty()) { throw Status.INVALID_ARGUMENT .withDescription("Registration recovery password must not be empty") .asRuntimeException(); } return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> registrationRecoveryPasswordsManager.storeForCurrentNumber(account.getNumber(), request.getRegistrationRecoveryPassword().toByteArray()))) .thenReturn(SetRegistrationRecoveryPasswordResponse.newBuilder().build()); }
@Test void setRegistrationRecoveryPassword() { final String phoneNumber = PhoneNumberUtil.getInstance().format(PhoneNumberUtil.getInstance().getExampleNumber("US"), PhoneNumberUtil.PhoneNumberFormat.E164); final Account account = mock(Account.class); when(account.getNumber()).thenReturn(phoneNumber); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final byte[] registrationRecoveryPassword = TestRandomUtil.nextBytes(32); assertDoesNotThrow(() -> authenticatedServiceStub().setRegistrationRecoveryPassword(SetRegistrationRecoveryPasswordRequest.newBuilder() .setRegistrationRecoveryPassword(ByteString.copyFrom(registrationRecoveryPassword)) .build())); verify(registrationRecoveryPasswordsManager).storeForCurrentNumber(phoneNumber, registrationRecoveryPassword); }
@Override public void open() throws Exception { super.open(); final String operatorID = getRuntimeContext().getOperatorUniqueID(); this.workerPool = ThreadPools.newWorkerPool("iceberg-worker-pool-" + operatorID, workerPoolSize); }
@TestTemplate public void testCommitTxnWithoutDataFiles() throws Exception { long checkpointId = 0; long timestamp = 0; JobID jobId = new JobID(); OperatorID operatorId; try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) { harness.setup(); harness.open(); operatorId = harness.getOperator().getOperatorID(); SimpleDataUtil.assertTableRows(table, Lists.newArrayList(), branch); assertSnapshotSize(0); assertMaxCommittedCheckpointId(jobId, operatorId, -1L); // It's better to advance the max-committed-checkpoint-id in iceberg snapshot, so that the // future flink job // failover won't fail. for (int i = 1; i <= 3; i++) { harness.snapshot(++checkpointId, ++timestamp); assertFlinkManifests(0); harness.notifyOfCompletedCheckpoint(checkpointId); assertFlinkManifests(0); assertSnapshotSize(i); assertMaxCommittedCheckpointId(jobId, operatorId, checkpointId); } } }
public String getMd5sum() { if (md5sum == null) { this.md5sum = determineHashes(MD5_HASHING_FUNCTION); } return this.md5sum; }
@Test public void testGetMd5sum() { //File file = new File(this.getClass().getClassLoader().getResource("struts2-core-2.1.2.jar").getPath()); File file = BaseTest.getResourceAsFile(this, "struts2-core-2.1.2.jar"); Dependency instance = new Dependency(file); //assertEquals("89CE9E36AA9A9E03F1450936D2F4F8DD0F961F8B", result.getSha1sum()); //String expResult = "C30B57142E1CCBC1EFD5CD15F307358F"; String expResult = "c30b57142e1ccbc1efd5cd15f307358f"; String result = instance.getMd5sum(); assertEquals(expResult, result); }
@Override public void open(Configuration parameters) throws Exception { this.rateLimiterTriggeredCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED); this.concurrentRunThrottledCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED); this.nothingToTriggerCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER); this.triggerCounters = taskNames.stream() .map( name -> getRuntimeContext() .getMetricGroup() .addGroup(TableMaintenanceMetrics.GROUP_KEY, name) .counter(TableMaintenanceMetrics.TRIGGERED)) .collect(Collectors.toList()); this.nextEvaluationTimeState = getRuntimeContext() .getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG)); this.accumulatedChangesState = getRuntimeContext() .getListState( new ListStateDescriptor<>( "triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class))); this.lastTriggerTimesState = getRuntimeContext() .getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG)); tableLoader.open(); }
@Test void testPosDeleteRecordCount() throws Exception { TriggerManager manager = manager( sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().posDeleteRecordCount(3).build()); try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness = harness(manager)) { testHarness.open(); addEventAndCheckResult( testHarness, TableChange.builder().posDeleteRecordCount(1L).build(), 0); addEventAndCheckResult( testHarness, TableChange.builder().posDeleteRecordCount(2L).build(), 1); addEventAndCheckResult( testHarness, TableChange.builder().posDeleteRecordCount(5L).build(), 2); // No trigger in this case addEventAndCheckResult( testHarness, TableChange.builder().posDeleteRecordCount(1L).build(), 2); addEventAndCheckResult( testHarness, TableChange.builder().posDeleteRecordCount(2L).build(), 3); } }
public Object modifyPrediction(Object prediction) { if (!(prediction instanceof Number)) { // TODO DROOLS-6345 TargetValue currently unimplemented - only direct number operations allowed return prediction; } double predictionDouble = (double) prediction; Number toReturn = applyMin(predictionDouble); toReturn = applyMax((double) toReturn); toReturn = applyRescaleFactor((double)toReturn); toReturn = applyRescaleConstant((double)toReturn); toReturn = applyCastInteger((double)toReturn); // TODO DROOLS-6345 TargetValue currently unimplemented return toReturn; }
@Test void modifyPrediction() { Object object = "STRING"; TargetField targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, null, null); KiePMMLTarget kiePMMLTarget = getBuilder(targetField).build(); assertThat(kiePMMLTarget.modifyPrediction(object)).isEqualTo(object); object = 4.33; assertThat(kiePMMLTarget.modifyPrediction(object)).isEqualTo(object); targetField = new TargetField(Collections.emptyList(), null, "string", null, 4.34, null, null, null); kiePMMLTarget = getBuilder(targetField).build(); object = "STRING"; assertThat(kiePMMLTarget.modifyPrediction(object)).isEqualTo(object); object = 4.33; assertThat(kiePMMLTarget.modifyPrediction(object)).isEqualTo(4.34); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testNonForwardedNestedPojo() { String[] nonForwardedFields = {"int1; pojo1.*"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, nonForwardedFields, null, nestedPojoType, nestedPojoType); assertThat(sp.getForwardingTargetFields(0, 0).size()).isZero(); assertThat(sp.getForwardingTargetFields(0, 1).size()).isZero(); assertThat(sp.getForwardingTargetFields(0, 2).size()).isZero(); assertThat(sp.getForwardingTargetFields(0, 3).size()).isZero(); assertThat(sp.getForwardingTargetFields(0, 4).size()).isZero(); assertThat(sp.getForwardingTargetFields(0, 5)).contains(5); nonForwardedFields[0] = "pojo1.int2; string1"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, nonForwardedFields, null, nestedPojoType, nestedPojoType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(1); assertThat(sp.getForwardingTargetFields(0, 2)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); assertThat(sp.getForwardingTargetFields(0, 4)).contains(4); assertThat(sp.getForwardingTargetFields(0, 5)).isEmpty(); }
@Override public KeyValueIterator<Windowed<K>, V> fetch(final K key) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = store.fetch(key); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata. " + "Original error message: " + ise); } } return KeyValueIterators.emptyIterator(); }
@Test public void shouldFindValueForKeyWhenMultiStores() { final ReadOnlySessionStoreStub<String, Long> secondUnderlying = new ReadOnlySessionStoreStub<>(); stubProviderTwo.addStore(storeName, secondUnderlying); final Windowed<String> keyOne = new Windowed<>("key-one", new SessionWindow(0, 0)); final Windowed<String> keyTwo = new Windowed<>("key-two", new SessionWindow(0, 0)); underlyingSessionStore.put(keyOne, 0L); secondUnderlying.put(keyTwo, 10L); final List<KeyValue<Windowed<String>, Long>> keyOneResults = toList(sessionStore.fetch("key-one")); final List<KeyValue<Windowed<String>, Long>> keyTwoResults = toList(sessionStore.fetch("key-two")); assertEquals(singletonList(KeyValue.pair(keyOne, 0L)), keyOneResults); assertEquals(singletonList(KeyValue.pair(keyTwo, 10L)), keyTwoResults); }
@Override public Iterator<COSBase> iterator() { return getObjects().iterator(); }
@Test void testSubsetting() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (PDDocument document = new PDDocument()) { PDPage page = new PDPage(PDRectangle.A4); document.addPage(page); document.save(baos); } try (PDDocument document = Loader.loadPDF(baos.toByteArray())) { PDPage page = document.getPage(0); PDFont font = PDType0Font.load(document, TestCOSIncrement.class.getResourceAsStream( "/org/apache/pdfbox/resources/ttf/LiberationSans-Regular.ttf")); try (PDPageContentStream contentStream = new PDPageContentStream(document, page)) { contentStream.beginText(); contentStream.setFont(font, 12); contentStream.newLineAtOffset(75, 750); contentStream.showText("Apache PDFBox"); contentStream.endText(); } COSDictionary catalog = document.getDocumentCatalog().getCOSObject(); catalog.setNeedToBeUpdated(true); COSDictionary pages = catalog.getCOSDictionary(COSName.PAGES); pages.setNeedToBeUpdated(true); page.getCOSObject().setNeedToBeUpdated(true); document.saveIncremental(new FileOutputStream("target/test-output/PDFBOX-5627.pdf")); } try (PDDocument document = Loader.loadPDF(new File("target/test-output/PDFBOX-5627.pdf"))) { PDPage page = document.getPage(0); COSName fontName = page.getResources().getFontNames().iterator().next(); PDFont font = page.getResources().getFont(fontName); assertTrue(font.isEmbedded()); } }
@Override public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final SingleRule rule, final Collection<DataNode> includedDataNodes) { Collection<QualifiedTable> singleTables = getSingleTables(selectStatementContext, database, rule); if (singleTables.isEmpty()) { return false; } if (containsView(database, singleTables)) { return true; } if (!includedDataNodes.isEmpty() && !isInnerCommaJoin(selectStatementContext.getSqlStatement())) { return true; } boolean result = rule.isAllTablesInSameComputeNode(includedDataNodes, singleTables); includedDataNodes.addAll(getTableDataNodes(rule, singleTables)); return !result; }
@Test void assertDecideWhenAllSingleTablesNotInSameComputeNode() { Collection<QualifiedTable> qualifiedTables = Arrays.asList(new QualifiedTable(DefaultDatabase.LOGIC_NAME, "t_order"), new QualifiedTable(DefaultDatabase.LOGIC_NAME, "t_order_item")); SingleRule rule = createSingleRule(qualifiedTables); SelectStatementContext select = createStatementContext(); Collection<DataNode> includedDataNodes = new HashSet<>(); when(rule.isAllTablesInSameComputeNode(includedDataNodes, qualifiedTables)).thenReturn(false); assertTrue(new SingleSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(), rule, includedDataNodes)); assertThat(includedDataNodes.size(), is(2)); }
@Override public void removeAll() { map.removeAll(Predicates.alwaysTrue()); }
@Test public void testRemoveAll() { map.put(23, "value-23"); map.put(42, "value-42"); adapter.removeAll(); assertEquals(0, map.size()); }
@Override public String getName() { return _name; }
@Test public void testStringSubStrTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("sub_str(%s, 0, 2)", STRING_ALPHANUM_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "substr"); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _stringAlphaNumericSVValues[i].substring(0, 2); } testTransformFunction(transformFunction, expectedValues); expression = RequestContextUtils.getExpression(String.format("substr(%s, '2', '-1')", STRING_ALPHANUM_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "substr"); expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _stringAlphaNumericSVValues[i].substring(2); } testTransformFunction(transformFunction, expectedValues); }
public static <T, R> R[] map(T[] array, Class<R> targetComponentType, Function<? super T, ? extends R> func) { final R[] result = newArray(targetComponentType, array.length); for (int i = 0; i < array.length; i++) { result[i] = func.apply(array[i]); } return result; }
@Test public void mapTest() { String[] keys = {"a", "b", "c"}; Integer[] values = {1, 2, 3}; Map<String, Integer> map = ArrayUtil.zip(keys, values, true); assertEquals(Objects.requireNonNull(map).toString(), "{a=1, b=2, c=3}"); }
@Override public void validate(Context context) { context.model().getContainerClusters().forEach((id, cluster) -> { Http http = cluster.getHttp(); if (http != null) { if (http.getAccessControl().isPresent()) { verifyAccessControlFilterPresent(context, http); } } }); }
@Test void validator_fails_with_empty_access_control_filter_chain() throws IOException, SAXException { DeployState deployState = createDeployState(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState); try { ValidationTester.validate(new AccessControlFilterValidator(), model, deployState); fail(); } catch (IllegalArgumentException e) { assertEquals("The 'access-control' feature is not available in open-source Vespa.", e.getMessage()); } }
@Override public void start() { languages.putAll(languagesLoader.load()); }
@Test public void should_log_if_language_has_no_suffixes_or_patterns() { WsTestUtil.mockReader(wsClient, "/api/languages/list", new InputStreamReader(getClass().getResourceAsStream("DefaultLanguageRepositoryTest/languages-ws.json"))); when(properties.getStringArray("sonar.java.file.suffixes")).thenReturn(JAVA_SUFFIXES); when(properties.getStringArray("sonar.xoo.file.patterns")).thenReturn(XOO_PATTERNS); underTest.start(); assertThat(logTester.logs(Level.DEBUG)).contains("Language 'Python' cannot be detected as it has neither suffixes nor patterns."); }
void validateMining(final KiePMMLMiningModel toValidate) { if (toValidate.getTargetField() == null || StringUtils.isEmpty(toValidate.getTargetField().trim())) { throw new KiePMMLInternalException(String.format(TARGET_FIELD_REQUIRED_RETRIEVED, toValidate.getTargetField())); } }
@Test void validateMiningNoTargetField() { assertThatExceptionOfType(KiePMMLInternalException.class).isThrownBy(() -> { String name = "NAME"; KiePMMLMiningModel kiePMMLMiningModel = KiePMMLMiningModel.builder("FILENAME", name, Collections.emptyList(), MINING_FUNCTION.ASSOCIATION_RULES).build(); evaluator.validateMining(kiePMMLMiningModel); }); }
@Override public Set<Class<? extends BaseStepMeta>> getSupportedSteps() { final Set<Class<? extends BaseStepMeta>> supportedSteps = new HashSet<>(); supportedSteps.add( JsonOutputMeta.class ); return supportedSteps; }
@Test public void testGetSupportedSteps() { JsonOutputAnalyzer analyzer = new JsonOutputAnalyzer(); Set<Class<? extends BaseStepMeta>> types = analyzer.getSupportedSteps(); assertNotNull( types ); assertEquals( types.size(), 1 ); assertTrue( types.contains( JsonOutputMeta.class ) ); }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void charToString() { final TypedExpression left = expr(THIS_PLACEHOLDER, java.lang.String.class); final TypedExpression right = expr("\'x'", char.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); final TypedExpression expected = new TypedExpression(new StringLiteralExpr("x"), String.class); assertThat(coerce.getCoercedRight()).isEqualTo(expected); }
public void pointTo(MemoryBuffer buf, int offset, int sizeInBytes) { this.buf = buf; this.baseOffset = offset; this.sizeInBytes = sizeInBytes; // Read the numBytes of key array from the aligned first 8 bytes as int. final int keyArrayBytes = buf.getInt32(offset); assert keyArrayBytes >= 0 : "keyArrayBytes (" + keyArrayBytes + ") should >= 0"; final int valueArrayBytes = sizeInBytes - keyArrayBytes - 8; assert valueArrayBytes >= 0 : "valueArraySize (" + valueArrayBytes + ") should >= 0"; keys.pointTo(buf, offset + 8, keyArrayBytes); values.pointTo(buf, offset + 8 + keyArrayBytes, valueArrayBytes); assert keys.numElements() == values.numElements(); }
@Test public void pointTo() { MemoryBuffer buffer = MemoryUtils.buffer(1024); int writerIndex = 8; // preserve 8 byte for numBytes BinaryArrayWriter keyArrayWriter = new BinaryArrayWriter(DataTypes.arrayField(DataTypes.utf8())); keyArrayWriter.reset(2); keyArrayWriter.write(0, "k0"); keyArrayWriter.write(1, "k1"); buffer.copyFrom( writerIndex, keyArrayWriter.getBuffer(), keyArrayWriter.getStartIndex(), keyArrayWriter.size()); writerIndex += keyArrayWriter.size(); buffer.putInt64(0, keyArrayWriter.size()); BinaryArrayWriter valueArrayWriter = new BinaryArrayWriter(DataTypes.arrayField(DataTypes.utf8())); valueArrayWriter.reset(2); valueArrayWriter.write(0, "v0"); valueArrayWriter.write(1, "v1"); buffer.copyFrom( writerIndex, valueArrayWriter.getBuffer(), valueArrayWriter.getStartIndex(), valueArrayWriter.size()); writerIndex += valueArrayWriter.size(); BinaryMap map = new BinaryMap(DataTypes.mapField(DataTypes.utf8(), DataTypes.utf8())); map.pointTo(buffer, 0, writerIndex); // System.out.println(map); }
@Override public void visit(Entry target) { final EntryAccessor entryAccessor = new EntryAccessor(); final Component component = (Component) entryAccessor.removeComponent(target); if (component != null) { if(component instanceof AbstractButton) ((AbstractButton)component).setAction(null); removeMenuComponent(component); ActionEnabler actionEnabler = target.removeAttribute(ActionEnabler.class); if(actionEnabler != null){ final AFreeplaneAction action = entryAccessor.getAction(target); action.removePropertyChangeListener(actionEnabler); } } }
@Test public void removesExtraSubmenusFromParents() throws Exception { final JComponentRemover componentRemover = JComponentRemover.INSTANCE; final Entry entry = new Entry(); JMenu parent = new JMenu(); JComponent entryComponent = new JMenu(); final MenuSplitter menuSplitter = new MenuSplitter(1); menuSplitter.addMenuComponent(parent, new JMenu()); menuSplitter.addMenuComponent(parent, entryComponent); new EntryAccessor().setComponent(entry, entryComponent); componentRemover.visit(entry); assertThat(parent.getPopupMenu().getComponentCount(), equalTo(1)); }
public static String getTemporaryBucketName( GSBlobIdentifier finalBlobIdentifier, GSFileSystemOptions options) { return options.getWriterTemporaryBucketName().orElse(finalBlobIdentifier.bucketName); }
@Test public void shouldUseTemporaryBucketNameIfSpecified() { Configuration flinkConfig = new Configuration(); flinkConfig.set(GSFileSystemOptions.WRITER_TEMPORARY_BUCKET_NAME, "temp"); GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig); GSBlobIdentifier identifier = new GSBlobIdentifier("foo", "bar"); String bucketName = BlobUtils.getTemporaryBucketName(identifier, options); assertEquals("temp", bucketName); }
public static List<Object> getFieldValues(Collection<Field> fields, Object o) { List<Object> results = new ArrayList<>(fields.size()); for (Field field : fields) { // Platform.objectFieldOffset(field) can't handle primitive field. Object fieldValue = FieldAccessor.createAccessor(field).get(o); results.add(fieldValue); } return results; }
@Test public void testGetFieldValues() { GetFieldValuesTestClass o = new GetFieldValuesTestClass(); o.f1 = "str"; o.f2 = 10; List<Object> fieldValues = ReflectionUtils.getFieldValues(Descriptor.getFields(GetFieldValuesTestClass.class), o); assertEquals(fieldValues, ImmutableList.of("str", 10)); }
public static ClassLoader getDefaultClassLoader() { ClassLoader cl = null; try { cl = Thread.currentThread().getContextClassLoader(); } catch (Throwable ex) { // Cannot access thread context ClassLoader - falling back... } if (cl == null) { // No thread context class loader -> use class loader of this class. cl = ClassUtils.class.getClassLoader(); if (cl == null) { // getClassLoader() returning null indicates the bootstrap ClassLoader try { cl = ClassLoader.getSystemClassLoader(); } catch (Throwable ex) { // Cannot access system ClassLoader - oh well, maybe the caller can live with null... } } } return cl; }
@Test void testGetDefaultClassLoader() { ClassLoader cachedClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(null); assertNotNull(ClassUtils.getDefaultClassLoader()); } finally { Thread.currentThread().setContextClassLoader(cachedClassLoader); } }
@Override public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize, final ConnectionMode connectionMode) throws SQLException { return getConnections0(databaseName, dataSourceName, connectionOffset, connectionSize, connectionMode); }
@Test void assertGetConnectionsWhenEmptyCache() throws SQLException { List<Connection> actual = databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY); assertThat(actual.size(), is(1)); }
public static String decodeToString(String source) { return decodeToString(source, DEFAULT_CHARSET); }
@Test void decodeToString() { getNaiveTestSet().forEach( (str, encoded) -> assertThat(Base62Utils.decodeToString(encoded)).isEqualTo(str)); }
@Override public void process(CruiseConfig cruiseConfig) { for (PipelineConfig pipelineConfig : cruiseConfig.getAllPipelineConfigs()) { if (pipelineConfig.hasTemplate()) { CaseInsensitiveString templateName = pipelineConfig.getTemplateName(); PipelineTemplateConfig pipelineTemplate = cruiseConfig.findTemplate(templateName); pipelineConfig.validateTemplate(pipelineTemplate); if (pipelineConfig.errors().isEmpty() && !pipelineConfig.hasTemplateApplied()) { pipelineConfig.usingTemplate(pipelineTemplate); } } } }
@Test public void shouldValidatePipelineToCheckItDoesNotAllowBothTemplateAndStages() throws Exception { PipelineConfig pipelineConfig = new PipelineConfig(new CaseInsensitiveString("p"), new MaterialConfigs()); pipelineConfig.templatize(new CaseInsensitiveString("template")); pipelineConfig.addStageWithoutValidityAssertion(new StageConfig(new CaseInsensitiveString("stage"), new JobConfigs())); preprocessor.process(new BasicCruiseConfig(new BasicPipelineConfigs(pipelineConfig))); assertThat(pipelineConfig.hasTemplateApplied(), is(false)); assertThat(pipelineConfig.errors().on("stages"), is("Cannot add stages to pipeline 'p' which already references template 'template'")); assertThat(pipelineConfig.errors().on("template"), is("Cannot set template 'template' on pipeline 'p' because it already has stages defined")); }