focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedNoArrowOneString() { String[] forwardedFields = {"f2;f3;f0"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, fiveIntTupleType, fiveIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); forwardedFields[0] = "2;3;0"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, fiveIntTupleType, fiveIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); forwardedFields[0] = "2;3;0;"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, fiveIntTupleType, fiveIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); }
boolean isModified(Namespace namespace) { Release release = releaseService.findLatestActiveRelease(namespace); List<Item> items = itemService.findItemsWithoutOrdered(namespace.getId()); if (release == null) { return hasNormalItems(items); } Map<String, String> releasedConfiguration = GSON.fromJson(release.getConfigurations(), GsonType.CONFIG); Map<String, String> configurationFromItems = generateConfigurationFromItems(namespace, items); MapDifference<String, String> difference = Maps.difference(releasedConfiguration, configurationFromItems); return !difference.areEqual(); }
@Test public void testChildNamespaceNotModified() { long childNamespaceId = 1, parentNamespaceId = 2; Namespace childNamespace = createNamespace(childNamespaceId); Namespace parentNamespace = createNamespace(parentNamespaceId); Release childRelease = createRelease("{\"k1\":\"v3\", \"k2\":\"v2\"}"); List<Item> childItems = Collections.singletonList(createItem("k1", "v3")); Release parentRelease = createRelease("{\"k1\":\"v1\", \"k2\":\"v2\"}"); when(releaseService.findLatestActiveRelease(childNamespace)).thenReturn(childRelease); when(releaseService.findLatestActiveRelease(parentNamespace)).thenReturn(parentRelease); when(itemService.findItemsWithoutOrdered(childNamespaceId)).thenReturn(childItems); when(namespaceService.findParentNamespace(childNamespace)).thenReturn(parentNamespace); boolean isModified = namespaceUnlockAspect.isModified(childNamespace); Assert.assertFalse(isModified); }
public List<CompactionTask> produce() { // get all CF files sorted by key range start (L1+) List<SstFileMetaData> sstSortedByCfAndStartingKeys = metadataSupplier.get().stream() .filter(l -> l.level() > 0) // let RocksDB deal with L0 .sorted(SST_COMPARATOR) .collect(Collectors.toList()); LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size()); List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys); tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed()); return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions)); }
@Test void testSingleFile() { assertThat(produce(configBuilder().build(), sstBuilder().build())).isNotEmpty(); }
public File getOrCreateDirectoryForTask(final TaskId taskId) { final File taskParentDir = getTaskDirectoryParentName(taskId); final File taskDir = new File(taskParentDir, StateManagerUtil.toTaskDirString(taskId)); if (hasPersistentStores) { if (!taskDir.exists()) { synchronized (taskDirCreationLock) { // to avoid a race condition, we need to check again if the directory does not exist: // otherwise, two threads might pass the outer `if` (and enter the `then` block), // one blocks on `synchronized` while the other creates the directory, // and the blocking one fails when trying to create it after it's unblocked if (!taskParentDir.exists() && !taskParentDir.mkdir()) { throw new ProcessorStateException( String.format("Parent [%s] of task directory [%s] doesn't exist and couldn't be created", taskParentDir.getPath(), taskDir.getPath())); } if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException( String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } } } else if (!taskDir.isDirectory()) { throw new ProcessorStateException( String.format("state directory [%s] can't be created as there is an existing file with the same name", taskDir.getPath())); } } return taskDir; }
@Test public void shouldCreateTaskDirectoriesUnderNamedTopologyDirs() throws IOException { initializeStateDirectory(true, true); directory.getOrCreateDirectoryForTask(new TaskId(0, 0, "topology1")); directory.getOrCreateDirectoryForTask(new TaskId(0, 1, "topology1")); directory.getOrCreateDirectoryForTask(new TaskId(0, 0, "topology2")); assertThat(new File(appDir, "__topology1__").exists(), is(true)); assertThat(new File(appDir, "__topology1__").isDirectory(), is(true)); assertThat(new File(appDir, "__topology2__").exists(), is(true)); assertThat(new File(appDir, "__topology2__").isDirectory(), is(true)); assertThat(new File(new File(appDir, "__topology1__"), "0_0").exists(), is(true)); assertThat(new File(new File(appDir, "__topology1__"), "0_0").isDirectory(), is(true)); assertThat(new File(new File(appDir, "__topology1__"), "0_1").exists(), is(true)); assertThat(new File(new File(appDir, "__topology1__"), "0_1").isDirectory(), is(true)); assertThat(new File(new File(appDir, "__topology2__"), "0_0").exists(), is(true)); assertThat(new File(new File(appDir, "__topology2__"), "0_0").isDirectory(), is(true)); }
public static StructType partitionType(Table table) { Collection<PartitionSpec> specs = table.specs().values(); return buildPartitionProjectionType("table partition", specs, allFieldIds(specs)); }
@Test public void testPartitionTypeWithSpecEvolutionInV2Tables() { TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, BY_DATA_SPEC, V2_FORMAT_VERSION); table.updateSpec().removeField("data").addField("category").commit(); assertThat(table.specs()).hasSize(2); StructType expectedType = StructType.of( NestedField.optional(1000, "data", Types.StringType.get()), NestedField.optional(1001, "category", Types.StringType.get())); StructType actualType = Partitioning.partitionType(table); assertThat(actualType).isEqualTo(expectedType); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void throwsAnExceptionOnOverrideArrayPropertyIndexOutOfBounds() { System.setProperty("dw.servers[4].port", "9000"); assertThatExceptionOfType(ArrayIndexOutOfBoundsException.class) .isThrownBy(() -> factory.build(configurationSourceProvider, validFile)) .withMessageContaining("index is greater than size of array"); }
public static String generateAsBase64(String content, QrConfig qrConfig, String targetType, String logoBase64) { return generateAsBase64(content, qrConfig, targetType, Base64.decode(logoBase64)); }
@Test public void generateAsBase64Test() { final String base64 = QrCodeUtil.generateAsBase64("https://hutool.cn/", new QrConfig(400, 400), "png"); Assert.notNull(base64); }
@Override public void process(MetricsPacket.Builder builder) { String serviceIdValue = builder.getDimensionValue(toDimensionId(INTERNAL_SERVICE_ID)); if (serviceIdValue != null) builder.putDimension(toDimensionId(SERVICE_ID), serviceIdValue); }
@Test public void new_service_id_is_added_when_internal_service_id_exists() { var builder = new MetricsPacket.Builder(toServiceId("foo")); builder.putDimension(toDimensionId(INTERNAL_SERVICE_ID), "service"); var processor = new ServiceIdDimensionProcessor(); processor.process(builder); assertTrue(builder.getDimensionIds().contains(NEW_ID_DIMENSION)); assertEquals("service", builder.getDimensionValue(NEW_ID_DIMENSION)); }
public int getVersion() { return _version; }
@Test public void withSomeData() throws JsonProcessingException { String confStr = "{\n" + " \"version\": 42\n" + "}"; RangeIndexConfig config = JsonUtils.stringToObject(confStr, RangeIndexConfig.class); assertFalse(config.isDisabled(), "Unexpected disabled"); assertEquals(config.getVersion(), 42, "Unexpected version"); }
public Exception smudgedException(Exception rawException) { try { Throwable cause = rawException.getCause(); if (cause != null) { smudgeException(cause); } smudgeException(rawException); } catch (Exception e) { ExceptionUtils.bomb(e); } return rawException; }
@Test void shouldSmudgeExceptionMessagesForNestedExceptions() { List<CommandArgument> args = List.of(new StringArgument("foo"), new PasswordArgument("bar")); List<SecretString> secrets = List.of(new PasswordArgument("quux")); ConsoleResult result = new ConsoleResult(0, List.of(" foo ", " bar ", " baz ", " abc "), List.of(" quux ", " bang "), args, secrets); Exception innerException = new Exception("baz quux baz"); Exception topException = new RuntimeException("foo bar abc", innerException); Exception exception = result.smudgedException(topException); assertThat(exception.getMessage()).isEqualTo("foo ****** abc"); assertThat(exception).isSameAs(topException); assertThat(exception.getCause().getMessage()).isEqualTo("baz ****** baz"); assertThat(exception.getCause()).isSameAs(innerException); }
public StepExpression createExpression(StepDefinition stepDefinition) { List<ParameterInfo> parameterInfos = stepDefinition.parameterInfos(); if (parameterInfos.isEmpty()) { return createExpression( stepDefinition.getPattern(), stepDefinitionDoesNotTakeAnyParameter(stepDefinition), false); } ParameterInfo parameterInfo = parameterInfos.get(parameterInfos.size() - 1); return createExpression( stepDefinition.getPattern(), parameterInfo.getTypeResolver()::resolve, parameterInfo.isTransposed()); }
@Test void unknown_target_type_does_no_transform_data_table() { StepDefinition stepDefinition = new StubStepDefinition("Given some stuff:", UNKNOWN_TYPE); StepExpression expression = stepExpressionFactory.createExpression(stepDefinition); List<Argument> match = expression.match("Given some stuff:", table); assertThat(match.get(0).getValue(), is(equalTo(DataTable.create(table)))); }
@Udf public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter( description = "The array to sort") final List<T> input) { return arraySortWithDirection(input, "ASC"); }
@Test public void shouldSortStringsMixedCase() { final List<String> input = Arrays.asList("foo", "Food", "bar", "Bar", "Baz"); final List<String> output = udf.arraySortDefault(input); assertThat(output, contains("Bar", "Baz", "Food", "bar", "foo")); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldFindAllPartitionsWithKeysAndRangeScan() { // Given: when(topology.describe()).thenReturn(description); when(description.subtopologies()).thenReturn(ImmutableSet.of(sub1)); when(sub1.nodes()).thenReturn(ImmutableSet.of(source, processor)); when(source.topicSet()).thenReturn(ImmutableSet.of(TOPIC_NAME)); when(processor.stores()).thenReturn(ImmutableSet.of(STORE_NAME)); when(kafkaStreams.streamsMetadataForStore(any())) .thenReturn(ImmutableList.of(HOST1_STREAMS_MD1, HOST1_STREAMS_MD2, HOST1_STREAMS_MD3)); // When: final List<KsqlPartitionLocation> result = locator.locate( ImmutableList.of(KEY), routingOptions, routingFilterFactoryStandby, true); // Then: assertThat(result.size(), is(3)); int partition = result.get(0).getPartition(); assertThat(partition, is(0)); List<KsqlNode> nodeList = result.get(0).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(activeNode)); assertThat(nodeList.get(1), is(standByNode1)); assertThat(nodeList.get(2), is(standByNode2)); partition = result.get(1).getPartition(); assertThat(partition, is(1)); nodeList = result.get(1).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(standByNode1)); assertThat(nodeList.get(1), is(activeNode)); assertThat(nodeList.get(2), is(standByNode2)); partition = result.get(2).getPartition(); assertThat(partition, is(2)); nodeList = result.get(2).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(standByNode2)); assertThat(nodeList.get(1), is(activeNode)); assertThat(nodeList.get(2), is(standByNode1)); }
@Override public TenantId getTenantId(NetworkId networkId) { VirtualNetwork virtualNetwork = getVirtualNetwork(networkId); checkNotNull(virtualNetwork, "The network does not exist."); return virtualNetwork.tenantId(); }
@Test(expected = NullPointerException.class) public void testGetTenantIdForNullVirtualNetwork() { manager.getTenantId(null); }
@Override protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this); setProperties(endpoint, parameters); return endpoint; }
@Test public void testCouchbaseAdditionalHostsWithSpaces() throws Exception { Map<String, Object> params = new HashMap<>(); params.put("additionalHosts", " 127.0.0.1, example.com, another-host "); params.put("bucket", "bucket"); String uri = "couchbase:http://localhost"; String remaining = "http://localhost"; CouchbaseEndpoint endpoint = new CouchbaseComponent(context).createEndpoint(uri, remaining, params); URI[] endpointArray = endpoint.makeBootstrapURI(); assertEquals(new URI("http://localhost:8091/pools"), endpointArray[0]); assertEquals(new URI("http://127.0.0.1:8091/pools"), endpointArray[1]); assertEquals(new URI("http://example.com:8091/pools"), endpointArray[2]); assertEquals(new URI("http://another-host:8091/pools"), endpointArray[3]); assertEquals(4, endpointArray.length); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit6() { String s6 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"ss2\", \"fields\": " + "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}"; Assert.assertEquals("c1.ss2.!", Schemas.visit(new Schema.Parser().parse(s6), new TestVisitor())); }
public Object execute(GlobalLockExecutor executor) throws Throwable { boolean alreadyInGlobalLock = RootContext.requireGlobalLock(); if (!alreadyInGlobalLock) { RootContext.bindGlobalLockFlag(); } // set my config to config holder so that it can be access in further execution // for example, LockRetryController can access it with config holder GlobalLockConfig myConfig = executor.getGlobalLockConfig(); GlobalLockConfig previousConfig = GlobalLockConfigHolder.setAndReturnPrevious(myConfig); try { return executor.execute(); } finally { // only unbind when this is the root caller. // otherwise, the outer caller would lose global lock flag if (!alreadyInGlobalLock) { RootContext.unbindGlobalLockFlag(); } // if previous config is not null, we need to set it back // so that the outer logic can still use their config if (previousConfig != null) { GlobalLockConfigHolder.setAndReturnPrevious(previousConfig); } else { GlobalLockConfigHolder.remove(); } } }
@Test void testSingle() { assertDoesNotThrow(() -> { template.execute(new GlobalLockExecutor() { @Override public Object execute() { assertTrue(RootContext.requireGlobalLock(), "fail to bind global lock flag"); assertSame(config1, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "global lock config changed during execution"); return null; } @Override public GlobalLockConfig getGlobalLockConfig() { return config1; } }); }); }
@Override public OutputStream getOutputStream() { return new RedissonOutputStream(); }
@Test public void testWriteArrayWithOffset() throws IOException { RBinaryStream stream = redisson.getBinaryStream("test"); OutputStream os = stream.getOutputStream(); byte[] value = {1, 2, 3, 4, 5, 6}; os.write(value, 0, 3); byte[] s = stream.get(); assertThat(s).isEqualTo(new byte[] {1, 2, 3}); os.write(value, 3, 3); s = stream.get(); assertThat(s).isEqualTo(value); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullValueJoinerOnTableLeftJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(testTable, (ValueJoiner<? super String, ? super String, ?>) null)); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
private Optional<BindingTableRule> findBindingTableRule(final Collection<String> logicTableNames) { for (String each : logicTableNames) { Optional<BindingTableRule> result = findBindingTableRule(each); if (result.isPresent()) { return result; } } return Optional.empty(); }
@Test void assertGetBindingTableRuleForNotFound() { assertFalse(createMaximumShardingRule().findBindingTableRule("new_Table").isPresent()); }
public void heartbeat(final String appName, final String id, final InstanceInfo info, final InstanceStatus overriddenStatus, boolean primeConnection) throws Throwable { if (primeConnection) { // We do not care about the result for priming request. replicationClient.sendHeartBeat(appName, id, info, overriddenStatus); return; } ReplicationTask replicationTask = new InstanceReplicationTask(targetHost, Action.Heartbeat, info, overriddenStatus, false) { @Override public EurekaHttpResponse<InstanceInfo> execute() throws Throwable { return replicationClient.sendHeartBeat(appName, id, info, overriddenStatus); } @Override public void handleFailure(int statusCode, Object responseEntity) throws Throwable { super.handleFailure(statusCode, responseEntity); if (statusCode == 404) { logger.warn("{}: missing entry.", getTaskName()); if (info != null) { logger.warn("{}: cannot find instance id {} and hence replicating the instance with status {}", getTaskName(), info.getId(), info.getStatus()); register(info); } } else if (config.shouldSyncWhenTimestampDiffers()) { InstanceInfo peerInstanceInfo = (InstanceInfo) responseEntity; if (peerInstanceInfo != null) { syncInstancesIfTimestampDiffers(appName, id, info, peerInstanceInfo); } } } }; long expiryTime = System.currentTimeMillis() + getLeaseRenewalOf(info); batchingDispatcher.process(taskId("heartbeat", info), replicationTask, expiryTime); }
@Test public void testHeartbeatBatchReplication() throws Throwable { createPeerEurekaNode().heartbeat(instanceInfo.getAppName(), instanceInfo.getId(), instanceInfo, null, false); ReplicationInstance replicationInstance = expectSingleBatchRequest(); assertThat(replicationInstance.getAction(), is(equalTo(Action.Heartbeat))); }
public static YamlProcessor create(boolean withResolver) { try { return CONSTRUCTOR.newInstance(withResolver); } catch (final IllegalAccessException | IllegalArgumentException | InstantiationException ex) { throw new LinkageError("Dependencies for Yaml are not loaded correctly: " + CLASS_NAME, ex); } catch (final InvocationTargetException ex) { final Throwable targetException = ex.getTargetException(); if (targetException instanceof RuntimeException) { throw (RuntimeException) targetException; } else if (targetException instanceof Error) { throw (Error) targetException; } else { throw new RuntimeException("Unexpected exception in creating: " + CLASS_NAME, ex); } } }
@Test public void create() { assertTrue(YamlProcessor.create(false) instanceof org.embulk.deps.config.YamlProcessorImpl); assertTrue(YamlProcessor.create(true) instanceof org.embulk.deps.config.YamlProcessorImpl); }
static String urlFor(String endpoint) { if (endpoint.startsWith("http")) { return endpoint; } return "https://" + endpoint; }
@Test public void urlFor() { assertEquals("https://some-endpoint", AwsRequestUtils.urlFor("some-endpoint")); assertEquals("https://some-endpoint", AwsRequestUtils.urlFor("https://some-endpoint")); assertEquals("http://some-endpoint", AwsRequestUtils.urlFor("http://some-endpoint")); }
@Override public Result invoke(Invocation invocation) throws RpcException { // When broadcasting, it should be called remotely. if (isBroadcast()) { if (logger.isDebugEnabled()) { logger.debug("Performing broadcast call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } return invoker.invoke(invocation); } if (peerFlag) { if (logger.isDebugEnabled()) { logger.debug("Performing point-to-point call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // If it's a point-to-point direct connection, invoke the original Invoker return invoker.invoke(invocation); } if (isInjvmExported()) { if (logger.isDebugEnabled()) { logger.debug("Performing local JVM call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // If it's exported to the local JVM, invoke the corresponding Invoker return injvmInvoker.invoke(invocation); } if (logger.isDebugEnabled()) { logger.debug("Performing remote call for method: " + RpcUtils.getMethodName(invocation) + " of service: " + getUrl().getServiceKey()); } // Otherwise, delegate the invocation to the original Invoker return invoker.invoke(invocation); }
@Test void testInjvmUrlInvoke() { URL url = URL.valueOf("injvm://1.2.3.4/" + DemoService.class.getName()); url = url.addParameter(REFER_KEY, URL.encode(PATH_KEY + "=" + DemoService.class.getName())); url = url.setScopeModel(ApplicationModel.defaultModel().getDefaultModule()); Invoker<DemoService> cluster = getClusterInvoker(url); invokers.add(cluster); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("doSomething7"); invocation.setParameterTypes(new Class[] {}); Result ret = cluster.invoke(invocation); Assertions.assertEquals("doSomething7", ret.getValue()); }
public double interpolate(double... x) { if (x.length != this.x[0].length) { throw new IllegalArgumentException(String.format("Invalid input vector size: %d, expected: %d", x.length, this.x[0].length)); } double weight = 0.0, sum = 0.0; for (int i = 0; i < this.x.length; i++) { double r = MathEx.squaredDistance(x, this.x[i]); if (r == 0.0) { return y[i]; } double w = Math.pow(r, p/2); weight += w; sum += w * y[i]; } return sum / weight; }
@Test public void testInterpolate() { System.out.println("interpolate"); double[][] x = {{0, 0}, {1, 1}}; double[] y = {0, 1}; ShepardInterpolation instance = new ShepardInterpolation(x, y); double[] x1 = {0.5, 0.5}; assertEquals(0, instance.interpolate(x[0]), 1E-7); assertEquals(1, instance.interpolate(x[1]), 1E-7); assertEquals(0.5, instance.interpolate(x1), 1E-7); }
public void setProfileParams(String value) { set(JobContext.TASK_PROFILE_PARAMS, value); }
@Test public void testProfileParamsSetter() { JobConf configuration = new JobConf(); configuration.setProfileParams("test"); Assert.assertEquals("test", configuration.get(MRJobConfig.TASK_PROFILE_PARAMS)); }
public void correctTenantUsage(String tenant) { tenantCapacityPersistService.correctUsage(tenant, TimeUtils.getCurrentTime()); }
@Test void testCorrectTenantUsage() { when(tenantCapacityPersistService.correctUsage(eq("testTenant"), any())).thenReturn(true); service.correctTenantUsage("testTenant"); Mockito.verify(tenantCapacityPersistService, times(1)).correctUsage(eq("testTenant"), any()); }
@Override public void readFrame(ChannelHandlerContext ctx, ByteBuf input, Http2FrameListener listener) throws Http2Exception { if (readError) { input.skipBytes(input.readableBytes()); return; } try { do { if (readingHeaders && !preProcessFrame(input)) { return; } // The header is complete, fall into the next case to process the payload. // This is to ensure the proper handling of zero-length payloads. In this // case, we don't want to loop around because there may be no more data // available, causing us to exit the loop. Instead, we just want to perform // the first pass at payload processing now. // Wait until the entire payload has been read. if (input.readableBytes() < payloadLength) { return; } // Slice to work only on the frame being read ByteBuf framePayload = input.readSlice(payloadLength); // We have consumed the data for this frame, next time we read, // we will be expecting to read a new frame header. readingHeaders = true; verifyFrameState(); processPayloadState(ctx, framePayload, listener); } while (input.isReadable()); } catch (Http2Exception e) { readError = !Http2Exception.isStreamError(e); throw e; } catch (RuntimeException e) { readError = true; throw e; } catch (Throwable cause) { readError = true; PlatformDependent.throwException(cause); } }
@Test public void failedWhenDataFrameNotAssociateWithStream() throws Http2Exception { final ByteBuf input = Unpooled.buffer(); ByteBuf payload = Unpooled.buffer(); try { payload.writeByte(1); writeFrameHeader(input, payload.readableBytes(), DATA, new Http2Flags().endOfStream(true), 0); input.writeBytes(payload); assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { frameReader.readFrame(ctx, input, listener); } }); } finally { payload.release(); input.release(); } }
public String create(final String secret, final String bucket, String region, final String key, final String method, final long expiry) { if(StringUtils.isBlank(region)) { // Only for AWS switch(session.getSignatureVersion()) { case AWS4HMACSHA256: // Region is required for AWS4-HMAC-SHA256 signature region = S3LocationFeature.DEFAULT_REGION.getIdentifier(); } } final Host bookmark = session.getHost(); return new RestS3Service(new AWSCredentials(StringUtils.strip(bookmark.getCredentials().getUsername()), StringUtils.strip(secret))) { @Override public String getEndpoint() { if(S3Session.isAwsHostname(bookmark.getHostname())) { return bookmark.getProtocol().getDefaultHostname(); } return bookmark.getHostname(); } @Override protected void initializeProxy(final HttpClientBuilder httpClientBuilder) { // } }.createSignedUrlUsingSignatureVersion( session.getSignatureVersion().toString(), region, method, bucket, key, null, null, expiry / 1000, false, true, new HostPreferences(bookmark).getBoolean("s3.bucket.virtualhost.disable")); }
@Test public void testDnsBucketNamingDisabled() { final Host host = new Host(new S3Protocol(), new S3Protocol().getDefaultHostname(), new Credentials( PROPERTIES.get("s3.key"), PROPERTIES.get("s3.secret") )) { @Override public String getProperty(final String key) { if("s3.bucket.virtualhost.disable".equals(key)) { return String.valueOf(true); } return super.getProperty(key); } }; final S3Session session = new S3Session(host); final Calendar expiry = Calendar.getInstance(TimeZone.getTimeZone("UTC")); expiry.add(Calendar.MILLISECOND, (int) TimeUnit.DAYS.toMillis(7)); final String url = new S3PresignedUrlProvider(session).create(PROPERTIES.get("s3.secret"), "test-bucket", "region", "f", "GET", expiry.getTimeInMillis()); assertNotNull(url); assertEquals("s3.amazonaws.com", URI.create(url).getHost()); assertEquals("/test-bucket/f", URI.create(url).getPath()); }
@Bean("ProjectBranches") public ProjectBranches provide(@Nullable ProjectBranchesLoader loader, ScannerProperties scannerProperties) { if (loader == null) { return new ProjectBranches(Collections.emptyList()); } Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); ProjectBranches branches = loader.load(scannerProperties.getProjectKey()); profiler.stopInfo(); return branches; }
@Test public void should_use_loader() { when(scannerProperties.getProjectKey()).thenReturn("key"); when(mockLoader.load("key")).thenReturn(mockBranches); ProjectBranches branches = provider.provide(mockLoader, scannerProperties); assertThat(branches).isSameAs(mockBranches); }
public ClientTpcConfig setConnectionCount(int connectionCount) { this.connectionCount = checkNotNegative(connectionCount, "connectionCount"); return this; }
@Test public void test_setConnectionCount_whenNegative() { ClientTpcConfig config = new ClientTpcConfig(); assertThrows(IllegalArgumentException.class, () -> config.setConnectionCount(-1)); }
public void replay(AccessControlEntryRecord record) { StandardAclWithId aclWithId = StandardAclWithId.fromRecord(record); changes.put(aclWithId.id(), Optional.of(aclWithId.acl())); }
@Test public void testThrowsExceptionOnInvalidStateWhenImageHasOtherAcls() { Uuid id = Uuid.fromString("nGiNMQHwRgmgsIlfu73aJQ"); AccessControlEntryRecord record = new AccessControlEntryRecord(); record.setId(id); record.setResourceType((byte) 1); record.setResourceName("foo"); record.setPatternType((byte) 1); record.setPrincipal("User:user"); record.setHost("host"); record.setOperation((byte) 1); record.setPermissionType((byte) 1); Map<Uuid, StandardAcl> initialImageMap = new HashMap<>(); initialImageMap.put(id, StandardAcl.fromRecord(record)); AclsImage image = new AclsImage(initialImageMap); AclsDelta delta = new AclsDelta(image); RemoveAccessControlEntryRecord removeAccessControlEntryRecord = testRemoveAccessControlEntryRecord(); assertThrows(IllegalStateException.class, () -> delta.replay(removeAccessControlEntryRecord)); }
@Override public boolean test(final Path test) { return this.equals(new SimplePathPredicate(test)); }
@Test public void testCollision() { final Path t = new Path("/d/2R", EnumSet.of(Path.Type.directory)); assertFalse(new SimplePathPredicate(t).test(new Path("/d/33", EnumSet.of(Path.Type.directory)))); }
@Override public synchronized void put(final Bytes key, final byte[] value) { physicalStore.put( prefixKeyFormatter.addPrefix(key), value); }
@Test public void shouldPut() { final KeyValue<String, String> sharedKeyV1 = new KeyValue<>("shared", "v1"); final KeyValue<String, String> sharedKeyV2 = new KeyValue<>("shared", "v2"); final KeyValue<String, String> sharedKeyV3 = new KeyValue<>("shared", "v3"); final KeyValue<String, String> segment0KeyOnly = new KeyValue<>("segment0_only", "foo"); final KeyValue<String, String> segment1KeyOnly = new KeyValue<>("segment1_only", "bar"); final KeyValue<String, String> negativeSegmentKeyOnly = new KeyValue<>("negative_segment_only", "baz"); segment0.put(new Bytes(serializeBytes(sharedKeyV1.key)), serializeBytes(sharedKeyV1.value)); segment0.put(new Bytes(serializeBytes(segment0KeyOnly.key)), serializeBytes(segment0KeyOnly.value)); segment1.put(new Bytes(serializeBytes(sharedKeyV2.key)), serializeBytes(sharedKeyV2.value)); segment1.put(new Bytes(serializeBytes(segment1KeyOnly.key)), serializeBytes(segment1KeyOnly.value)); negativeIdSegment.put(new Bytes(serializeBytes(sharedKeyV3.key)), serializeBytes(sharedKeyV3.value)); negativeIdSegment.put(new Bytes(serializeBytes(negativeSegmentKeyOnly.key)), serializeBytes(negativeSegmentKeyOnly.value)); assertEquals("v1", getAndDeserialize(segment0, "shared")); assertEquals("v2", getAndDeserialize(segment1, "shared")); assertEquals("v3", getAndDeserialize(negativeIdSegment, "shared")); assertEquals("foo", getAndDeserialize(segment0, "segment0_only")); assertNull(getAndDeserialize(segment1, "segment0_only")); assertNull(getAndDeserialize(negativeIdSegment, "segment0_only")); assertNull(getAndDeserialize(segment0, "segment1_only")); assertEquals("bar", getAndDeserialize(segment1, "segment1_only")); assertNull(getAndDeserialize(negativeIdSegment, "segment1_only")); assertNull(getAndDeserialize(segment0, "negative_segment_only")); assertNull(getAndDeserialize(segment1, "negative_segment_only")); assertEquals("baz", getAndDeserialize(negativeIdSegment, "negative_segment_only")); }
@Override public void updateInstanceStatus(String status) { client.updateInstanceStatus(status); }
@Test public void updateInstanceStatus() { scRegister.updateInstanceStatus(status); Mockito.verify(scClient, Mockito.times(1)).updateInstanceStatus(status); }
public PriorityFutureTask<Void> submit(PriorityRunnable task) { if (task == null) { throw new NullPointerException(); } final PriorityFutureTask<Void> ftask = new PriorityFutureTask(task, null); execute(ftask); return ftask; }
@Test public void testSamePriority() throws InterruptedException, ExecutionException { PriorityBlockingQueue<Runnable> workQueue = new PriorityBlockingQueue<Runnable>(1000); PriorityThreadPoolExecutor pool = new PriorityThreadPoolExecutor(1, 1, 1, TimeUnit.MINUTES, workQueue); sleepFlag.set(true); Future[] futures = new Future[10]; StringBuffer buffer = new StringBuffer(); for (int i = 0; i < futures.length; i++) { futures[i] = pool.submit(new TenSecondTask(i, 1, buffer)); } sleepFlag.set(false); for (int i = 0; i < futures.length; i++) { futures[i].get(); } assertEquals("01@00, 01@01, 01@02, 01@03, 01@04, 01@05, 01@06, 01@07, 01@08, 01@09, ", buffer.toString()); }
Set<org.apache.parquet.column.Encoding> fromFormatEncodings(List<Encoding> encodings) { Set<org.apache.parquet.column.Encoding> converted = new HashSet<org.apache.parquet.column.Encoding>(); for (Encoding encoding : encodings) { converted.add(getEncoding(encoding)); } // make converted unmodifiable, drop reference to modifiable copy converted = Collections.unmodifiableSet(converted); // atomically update the cache Set<org.apache.parquet.column.Encoding> cached = cachedEncodingSets.putIfAbsent(converted, converted); if (cached == null) { // cached == null signifies that converted was *not* in the cache previously // so we can return converted instead of throwing it away, it has now // been cached cached = converted; } return cached; }
@Test public void testEncodingsCache() { ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter(); List<org.apache.parquet.format.Encoding> formatEncodingsCopy1 = Arrays.asList( org.apache.parquet.format.Encoding.BIT_PACKED, org.apache.parquet.format.Encoding.RLE_DICTIONARY, org.apache.parquet.format.Encoding.DELTA_LENGTH_BYTE_ARRAY); List<org.apache.parquet.format.Encoding> formatEncodingsCopy2 = Arrays.asList( org.apache.parquet.format.Encoding.BIT_PACKED, org.apache.parquet.format.Encoding.RLE_DICTIONARY, org.apache.parquet.format.Encoding.DELTA_LENGTH_BYTE_ARRAY); Set<org.apache.parquet.column.Encoding> expected = new HashSet<org.apache.parquet.column.Encoding>(); expected.add(org.apache.parquet.column.Encoding.BIT_PACKED); expected.add(org.apache.parquet.column.Encoding.RLE_DICTIONARY); expected.add(org.apache.parquet.column.Encoding.DELTA_LENGTH_BYTE_ARRAY); Set<org.apache.parquet.column.Encoding> res1 = parquetMetadataConverter.fromFormatEncodings(formatEncodingsCopy1); Set<org.apache.parquet.column.Encoding> res2 = parquetMetadataConverter.fromFormatEncodings(formatEncodingsCopy1); Set<org.apache.parquet.column.Encoding> res3 = parquetMetadataConverter.fromFormatEncodings(formatEncodingsCopy2); // make sure they are all semantically equal assertEquals(expected, res1); assertEquals(expected, res2); assertEquals(expected, res3); // make sure res1, res2, and res3 are actually the same cached object assertSame(res1, res2); assertSame(res1, res3); // make sure they are all unmodifiable (UnmodifiableSet is not public, so we have to compare on class name) assertEquals("java.util.Collections$UnmodifiableSet", res1.getClass().getName()); assertEquals("java.util.Collections$UnmodifiableSet", res2.getClass().getName()); assertEquals("java.util.Collections$UnmodifiableSet", res3.getClass().getName()); }
@Udf(description = "Returns the hyperbolic cosine of an INT value") public Double cosh( @UdfParameter( value = "value", description = "The value in radians to get the hyperbolic cosine of." ) final Integer value ) { return cosh(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleMoreThanPositive2Pi() { assertThat(udf.cosh(9.1), closeTo(4477.646407574158, 0.000000000000001)); assertThat(udf.cosh(6.3), closeTo(272.286873215353, 0.000000000000001)); assertThat(udf.cosh(7), closeTo(548.317035155212, 0.000000000000001)); assertThat(udf.cosh(7L), closeTo(548.317035155212, 0.000000000000001)); }
@Description("remainder of given quotient") @ScalarFunction @SqlType(StandardTypes.BIGINT) public static long mod(@SqlType(StandardTypes.BIGINT) long num1, @SqlType(StandardTypes.BIGINT) long num2) { return num1 % num2; }
@Test public void testMod() { for (int left : intLefts) { for (int right : intRights) { assertFunction("mod(" + left + ", " + right + ")", INTEGER, (left % right)); } } for (int left : intLefts) { for (int right : intRights) { assertFunction("mod( BIGINT '" + left + "' , BIGINT '" + right + "')", BIGINT, (long) (left % right)); } } for (long left : intLefts) { for (long right : intRights) { assertFunction("mod(" + left * 10000000000L + ", " + right * 10000000000L + ")", BIGINT, (left * 10000000000L) % (right * 10000000000L)); } } for (int left : intLefts) { for (double right : doubleRights) { assertFunction("mod(" + left + ", DOUBLE '" + right + "')", DOUBLE, left % right); } } for (int left : intLefts) { for (double right : doubleRights) { assertFunction("mod(" + left + ", REAL '" + (float) right + "')", REAL, left % (float) right); } } for (double left : doubleLefts) { for (long right : intRights) { assertFunction("mod(DOUBLE '" + left + "', " + right + ")", DOUBLE, left % right); } } for (double left : doubleLefts) { for (long right : intRights) { assertFunction("mod(REAL '" + (float) left + "', " + right + ")", REAL, (float) left % right); } } for (double left : doubleLefts) { for (double right : doubleRights) { assertFunction("mod(DOUBLE '" + left + "', DOUBLE '" + right + "')", DOUBLE, left % right); } } for (double left : doubleLefts) { for (double right : doubleRights) { assertFunction("mod(REAL '" + (float) left + "', REAL '" + (float) right + "')", REAL, (float) left % (float) right); } } assertFunction("mod(5.0E0, NULL)", DOUBLE, null); assertFunction("mod(NULL, 5.0E0)", DOUBLE, null); assertFunction("mod(DECIMAL '0.0', DECIMAL '2.0')", createDecimalType(1, 1), SqlDecimal.of("0.0")); assertFunction("mod(DECIMAL '13.0', DECIMAL '5.0')", createDecimalType(2, 1), SqlDecimal.of("3.0")); assertFunction("mod(DECIMAL '-13.0', DECIMAL '5.0')", createDecimalType(2, 1), SqlDecimal.of("-3.0")); assertFunction("mod(DECIMAL '13.0', DECIMAL '-5.0')", createDecimalType(2, 1), SqlDecimal.of("3.0")); assertFunction("mod(DECIMAL '-13.0', DECIMAL '-5.0')", createDecimalType(2, 1), SqlDecimal.of("-3.0")); assertFunction("mod(DECIMAL '5.0', DECIMAL '2.5')", createDecimalType(2, 1), SqlDecimal.of("0.0")); assertFunction("mod(DECIMAL '5.0', DECIMAL '2.05')", createDecimalType(3, 2), SqlDecimal.of("0.90")); assertFunction("mod(DECIMAL '5.0', DECIMAL '2.55')", createDecimalType(3, 2), SqlDecimal.of("2.45")); assertFunction("mod(DECIMAL '5.0001', DECIMAL '2.55')", createDecimalType(5, 4), SqlDecimal.of("2.4501")); assertFunction("mod(DECIMAL '123456789012345670', DECIMAL '123456789012345669')", createDecimalType(18, 0), SqlDecimal.of("0.01")); assertFunction("mod(DECIMAL '12345678901234567.90', DECIMAL '12345678901234567.89')", createDecimalType(19, 2), SqlDecimal.of("0.01")); assertFunction("mod(DECIMAL '5.0', CAST(NULL as DECIMAL(1,0)))", createDecimalType(2, 1), null); assertFunction("mod(CAST(NULL as DECIMAL(1,0)), DECIMAL '5.0')", createDecimalType(2, 1), null); assertInvalidFunction("mod(DECIMAL '5.0', DECIMAL '0')", DIVISION_BY_ZERO); }
@Override public Object collect() { return value; }
@Test public void test_default() { MaxSqlAggregation aggregation = new MaxSqlAggregation(); assertThat(aggregation.collect()).isNull(); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) { if ( point1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null")); } if ( point2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null")); } try { boolean result = point1.compareTo( point2 ) < 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2")); } }
@Test void invokeParamRangeAndRange() { FunctionTestUtil.assertResult( beforeFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( beforeFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "g", "k", Range.RangeBoundary.CLOSED ) ), Boolean.TRUE ); FunctionTestUtil.assertResult( beforeFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( beforeFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.OPEN, "f", "k", Range.RangeBoundary.CLOSED ) ), Boolean.TRUE ); FunctionTestUtil.assertResult( beforeFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN ), new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ) ), Boolean.TRUE ); }
public RowSerializer(TypeSerializer<?>[] fieldSerializers) { this(fieldSerializers, null, true); }
@Test void testRowSerializer() { final TypeInformation<Row> rowTypeInfo = Types.ROW_NAMED( new String[] {"a", "b", "c", "d"}, Types.INT, Types.STRING, Types.DOUBLE, Types.BOOLEAN); final Row positionedRow = Row.withPositions(RowKind.UPDATE_BEFORE, 4); positionedRow.setKind(RowKind.UPDATE_BEFORE); positionedRow.setField(0, 1); positionedRow.setField(1, "a"); positionedRow.setField(2, null); positionedRow.setField(3, false); final Row namedRow = Row.withNames(RowKind.UPDATE_BEFORE); namedRow.setField("a", 1); namedRow.setField("b", "a"); namedRow.setField("c", null); namedRow.setField("d", false); final Row sparseNamedRow = Row.withNames(RowKind.UPDATE_BEFORE); namedRow.setField("a", 1); namedRow.setField("b", "a"); namedRow.setField("d", false); // "c" is missing final LinkedHashMap<String, Integer> positionByName = new LinkedHashMap<>(); positionByName.put("a", 0); positionByName.put("b", 1); positionByName.put("c", 2); positionByName.put("d", 3); final Row namedPositionedRow = RowUtils.createRowWithNamedPositions( RowKind.UPDATE_BEFORE, new Object[4], positionByName); namedPositionedRow.setField("a", 1); namedPositionedRow.setField(1, "a"); namedPositionedRow.setField(2, null); namedPositionedRow.setField("d", false); final TypeSerializer<Row> serializer = rowTypeInfo.createSerializer(new SerializerConfigImpl()); final RowSerializerTestInstance instance = new RowSerializerTestInstance( serializer, positionedRow, namedRow, sparseNamedRow, namedPositionedRow); instance.testAll(); }
public static JavaBeanDescriptor serialize(Object obj) { return serialize(obj, JavaBeanAccessor.FIELD); }
@Test void testBeanSerialize() { Bean bean = new Bean(); bean.setDate(new Date()); bean.setStatus(PersonStatus.ENABLED); bean.setType(Bean.class); bean.setArray(new Phone[] {}); Collection<Phone> collection = new ArrayList<Phone>(); bean.setCollection(collection); Phone phone = new Phone(); collection.add(phone); Map<String, FullAddress> map = new HashMap<String, FullAddress>(); FullAddress address = new FullAddress(); map.put("first", address); bean.setAddresses(map); JavaBeanDescriptor descriptor = JavaBeanSerializeUtil.serialize(bean, JavaBeanAccessor.METHOD); Assertions.assertTrue(descriptor.isBeanType()); assertEqualsPrimitive(bean.getDate(), descriptor.getProperty("date")); assertEqualsEnum(bean.getStatus(), descriptor.getProperty("status")); Assertions.assertTrue(((JavaBeanDescriptor) descriptor.getProperty("type")).isClassType()); Assertions.assertEquals( Bean.class.getName(), ((JavaBeanDescriptor) descriptor.getProperty("type")).getClassNameProperty()); Assertions.assertTrue(((JavaBeanDescriptor) descriptor.getProperty("array")).isArrayType()); Assertions.assertEquals(0, ((JavaBeanDescriptor) descriptor.getProperty("array")).propertySize()); JavaBeanDescriptor property = (JavaBeanDescriptor) descriptor.getProperty("collection"); Assertions.assertTrue(property.isCollectionType()); Assertions.assertEquals(1, property.propertySize()); property = (JavaBeanDescriptor) property.getProperty(0); Assertions.assertTrue(property.isBeanType()); Assertions.assertEquals(Phone.class.getName(), property.getClassName()); Assertions.assertEquals(0, property.propertySize()); property = (JavaBeanDescriptor) descriptor.getProperty("addresses"); Assertions.assertTrue(property.isMapType()); Assertions.assertEquals(bean.getAddresses().getClass().getName(), property.getClassName()); Assertions.assertEquals(1, property.propertySize()); Map.Entry<Object, Object> entry = property.iterator().next(); Assertions.assertTrue(((JavaBeanDescriptor) entry.getKey()).isPrimitiveType()); Assertions.assertEquals("first", ((JavaBeanDescriptor) entry.getKey()).getPrimitiveProperty()); Assertions.assertTrue(((JavaBeanDescriptor) entry.getValue()).isBeanType()); Assertions.assertEquals(FullAddress.class.getName(), ((JavaBeanDescriptor) entry.getValue()).getClassName()); Assertions.assertEquals(0, ((JavaBeanDescriptor) entry.getValue()).propertySize()); }
public int getErrCode() { return errCode; }
@Test void testConstructorWithErrorCode() { NacosRuntimeException exception = new NacosRuntimeException(NacosException.INVALID_PARAM); assertEquals(NacosException.INVALID_PARAM, exception.getErrCode()); assertNull(exception.getMessage()); assertNull(exception.getCause()); }
public CeQueueDto setSubmitterUuid(@Nullable String s) { checkArgument(s == null || s.length() <= 255, "Value of submitter uuid is too long: %s", s); this.submitterUuid = s; return this; }
@Test void setSubmitterLogin_accepts_null_empty_and_string_255_chars_or_less() { assertThatNoException().isThrownBy(() -> underTest.setSubmitterUuid(null)); assertThatNoException().isThrownBy(() -> underTest.setSubmitterUuid("")); assertThatNoException().isThrownBy(() -> underTest.setSubmitterUuid("bar")); assertThatNoException().isThrownBy(() -> underTest.setSubmitterUuid(STR_255_CHARS)); }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void doNotCast() { final TypedExpression left = expr(THIS_PLACEHOLDER + ".intValue()", int.class); final TypedExpression right = expr("$one << $shift", long.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedRight()).isEqualTo(expr("$one << $shift", long.class)); }
public static <K, V> Write<K, V> write() { return new AutoValue_CdapIO_Write.Builder<K, V>().build(); }
@Test public void testWriteObjectCreationFailsIfCdapPluginClassIsNotSupported() { assertThrows( UnsupportedOperationException.class, () -> CdapIO.<String, String>write().withCdapPluginClass(EmployeeBatchSink.class)); }
public Span nextSpan(TraceContextOrSamplingFlags extracted) { if (extracted == null) throw new NullPointerException("extracted == null"); TraceContext context = extracted.context(); if (context != null) return newChild(context); TraceIdContext traceIdContext = extracted.traceIdContext(); if (traceIdContext != null) { return _toSpan(null, decorateContext( InternalPropagation.instance.flags(extracted.traceIdContext()), traceIdContext.traceIdHigh(), traceIdContext.traceId(), 0L, 0L, 0L, extracted.extra() )); } SamplingFlags samplingFlags = extracted.samplingFlags(); List<Object> extra = extracted.extra(); TraceContext parent = currentTraceContext.get(); int flags; long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L; if (parent != null) { // At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical // trace sampling is up front, we retain the decision from the parent. flags = InternalPropagation.instance.flags(parent); traceIdHigh = parent.traceIdHigh(); traceId = parent.traceId(); localRootId = parent.localRootId(); spanId = parent.spanId(); extra = concat(extra, parent.extra()); } else { flags = InternalPropagation.instance.flags(samplingFlags); } return _toSpan(parent, decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra)); }
@Test void nextSpan_usesSampler() { assertThat(tracer.nextSpan().context().parentId()).isNull(); assertThat(tracer.nextSpan(neverSample(), false).context().sampled()).isFalse(); }
public int runCommand(final String command) { int errorCode = NO_ERROR; RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient); try { // Commands executed by the '-e' parameter do not need to execute specific CLI // commands. For RUN SCRIPT commands, users can use the '-f' command parameter. handleLine(command); } catch (final EndOfFileException exception) { // Ignore - only used by runInteractively() to exit the CLI } catch (final Exception exception) { errorCode = ERROR; LOGGER.error("An error occurred while running a command. Error = " + exception.getMessage(), exception); terminal.printError(ErrorMessageUtil.buildErrorMessage(exception), exception.toString()); } terminal.flush(); return errorCode; }
@Test public void shouldPrintErrorIfCantConnectToRestServerOnRunCommand() throws Exception { givenRunInteractivelyWillExit(); final KsqlRestClient mockRestClient = givenMockRestClient(); when(mockRestClient.getServerInfo()) .thenThrow(new KsqlRestClientException("Boom", new IOException(""))); final int error_code = new Cli(1L, 1L, mockRestClient, console) .runCommand("this is a command"); assertThat(error_code, is(-1)); assertThat(terminal.getOutputString(), containsString("Please ensure that the URL provided is for an active KSQL server.")); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return delegate.find(file, listener); } if(cache.isValid(file.getParent())) { final AttributedList<Path> list = cache.get(file.getParent()); final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(sensitivity, file)); if(null != found) { if(log.isDebugEnabled()) { log.debug(String.format("Return cached attributes %s for %s", found.attributes(), file)); } return found.attributes(); } if(log.isDebugEnabled()) { log.debug(String.format("Cached directory listing does not contain %s", file)); } throw new NotfoundException(file.getAbsolute()); } final CachingListProgressListener caching = new CachingListProgressListener(cache); try { final PathAttributes attr = delegate.find(file, new ProxyListProgressListener(listener, caching)); caching.cache(); return attr; } catch(NotfoundException e) { caching.cache(); throw e; } }
@Test public void testFindWithDefaultExist() throws Exception { final PathCache cache = new PathCache(1); final Path directory = new Path("/", EnumSet.of(Path.Type.directory)); final Path file = new Path(directory, "f", EnumSet.of(Path.Type.file)); final CachingAttributesFinderFeature feature = new CachingAttributesFinderFeature(Protocol.Case.sensitive, cache, new DefaultAttributesFinderFeature(new NullSession(new Host(new TestProtocol())) { @Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws ConnectionCanceledException { final Path f = new Path(directory, "f", EnumSet.of(Path.Type.file)); listener.chunk(directory, new AttributedList<>(Collections.singletonList(f))); return new AttributedList<>(Collections.singletonList(f)); } })); assertNotNull(feature.find(file, new DisabledListProgressListener())); assertNotSame(file.attributes(), feature.find(file, new DisabledListProgressListener())); assertEquals(file.attributes(), feature.find(file, new DisabledListProgressListener())); assertEquals(1, cache.size()); assertTrue(cache.isCached(directory)); assertTrue(cache.get(directory).contains(file)); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testLockingRowFailsStops() throws IOException { when(metadataTableDao.lockAndRecordPartition(partitionRecord)).thenReturn(false); when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // On failure to lock, we try to claim a fail to lock, so it will terminate gracefully. StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); verify(tracker).tryClaim(streamProgress); verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any()); }
public static <T> FullWindowedValueCoder<T> getFullCoder( Coder<T> valueCoder, Coder<? extends BoundedWindow> windowCoder) { return FullWindowedValueCoder.of(valueCoder, windowCoder); }
@Test public void testFullWindowedValueCoderIsSerializableWithWellKnownCoderType() { CoderProperties.coderSerializable( WindowedValue.getFullCoder(GlobalWindow.Coder.INSTANCE, GlobalWindow.Coder.INSTANCE)); }
public void extractTablesFromSelect(final SelectStatement selectStatement) { if (selectStatement.getCombine().isPresent()) { CombineSegment combineSegment = selectStatement.getCombine().get(); extractTablesFromSelect(combineSegment.getLeft().getSelect()); extractTablesFromSelect(combineSegment.getRight().getSelect()); } if (selectStatement.getFrom().isPresent() && !selectStatement.getCombine().isPresent()) { extractTablesFromTableSegment(selectStatement.getFrom().get()); } selectStatement.getWhere().ifPresent(optional -> extractTablesFromExpression(optional.getExpr())); if (null != selectStatement.getProjections() && !selectStatement.getCombine().isPresent()) { extractTablesFromProjections(selectStatement.getProjections()); } selectStatement.getGroupBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getGroupByItems())); selectStatement.getOrderBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getOrderByItems())); selectStatement.getHaving().ifPresent(optional -> extractTablesFromExpression(optional.getExpr())); selectStatement.getWithSegment().ifPresent(optional -> extractTablesFromCTEs(optional.getCommonTableExpressions())); selectStatement.getLock().ifPresent(this::extractTablesFromLock); }
@Test void assertExtractTablesFromCombineWithSubQueryProjection() { SelectStatement selectStatement = createSelectStatementWithSubQueryProjection("t_order"); SubquerySegment left = new SubquerySegment(0, 0, createSelectStatementWithSubQueryProjection("t_order"), ""); SubquerySegment right = new SubquerySegment(0, 0, createSelectStatementWithSubQueryProjection("t_order_item"), ""); when(selectStatement.getCombine()).thenReturn(Optional.of(new CombineSegment(0, 0, left, CombineType.UNION, right))); tableExtractor.extractTablesFromSelect(selectStatement); Collection<SimpleTableSegment> actual = tableExtractor.getRewriteTables(); assertThat(actual.size(), is(2)); Iterator<SimpleTableSegment> iterator = actual.iterator(); assertTableSegment(iterator.next(), 0, 0, "t_order"); assertTableSegment(iterator.next(), 0, 0, "t_order_item"); }
public void logOnCanvassPosition( final int memberId, final long logLeadershipTermId, final long logPosition, final long leadershipTermId, final int followerMemberId, final int protocolVersion) { final int length = canvassPositionLength(); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(CANVASS_POSITION.toEventCodeId(), encodedLength); if (index > 0) { try { encodeOnCanvassPosition( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, logLeadershipTermId, logPosition, leadershipTermId, followerMemberId, protocolVersion); } finally { ringBuffer.commit(index); } } }
@Test void logOnCanvassPosition() { final long logLeadershipTermId = 96; final long logPosition = 128L; final long leadershipTermId = 54; final int followerMemberId = 15; final int protocolVersion = SemanticVersion.compose(1, 9, 9); final int memberId = 222; final int offset = 64; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); logger.logOnCanvassPosition( memberId, logLeadershipTermId, logPosition, leadershipTermId, followerMemberId, protocolVersion); verifyLogHeader( logBuffer, offset, CANVASS_POSITION.toEventCodeId(), canvassPositionLength(), canvassPositionLength()); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(logLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN)); assertEquals(logPosition, logBuffer.getLong(index + SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(leadershipTermId, logBuffer.getLong(index + 2 * SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(followerMemberId, logBuffer.getInt(index + 3 * SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(protocolVersion, logBuffer.getInt(index + 3 * SIZE_OF_LONG + SIZE_OF_INT, LITTLE_ENDIAN)); assertEquals(memberId, logBuffer.getInt(index + 3 * SIZE_OF_LONG + 2 * SIZE_OF_INT, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectCanvassPosition( CANVASS_POSITION, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: CANVASS_POSITION " + "\\[36/36]: memberId=222 logLeadershipTermId=96 logPosition=128 leadershipTermId=54 followerMemberId=15 " + "protocolVersion=1.9.9"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Override public boolean match(Message msg, StreamRule rule) { if (msg.getField(rule.getField()) == null) { return rule.getInverted(); } final String value = msg.getField(rule.getField()).toString(); return rule.getInverted() ^ value.trim().equals(rule.getValue()); }
@Test public void testInvertedNullFieldShouldMatch() { final String fieldName = "nullfield"; final StreamRule rule = getSampleRule(); rule.setField(fieldName); rule.setInverted(true); final Message msg = getSampleMessage(); msg.addField(fieldName, null); final StreamRuleMatcher matcher = getMatcher(rule); assertTrue(matcher.match(msg, rule)); }
@Override public HandlerStatus onRead() throws Exception { src.flip(); try { while (src.hasRemaining()) { Packet packet = packetReader.readFrom(src); if (packet == null) { break; } onPacketComplete(packet); } return CLEAN; } finally { compactOrClear(src); } }
@Test public void whenMultiplePackets() throws Exception { ByteBuffer src = ByteBuffer.allocate(1000); Packet packet1 = new Packet(serializationService.toBytes("packet1")); new PacketIOHelper().writeTo(packet1, src); Packet packet2 = new Packet(serializationService.toBytes("packet2")); new PacketIOHelper().writeTo(packet2, src); Packet packet3 = new Packet(serializationService.toBytes("packet3")); new PacketIOHelper().writeTo(packet3, src); Packet packet4 = new Packet(serializationService.toBytes("packet4")); packet4.raiseFlags(Packet.FLAG_URGENT); new PacketIOHelper().writeTo(packet4, src); decoder.src(src); decoder.onRead(); assertEquals(asList(packet1, packet2, packet3, packet4), dispatcher.packets); assertEquals(3, normalPacketCounter.get()); assertEquals(1, priorityPacketCounter.get()); }
public void destroy() { mGeneratingDisposable.dispose(); mGenerateStateSubject.onNext(LoadingState.NOT_LOADED); mGenerateStateSubject.onComplete(); }
@Test @Ignore("I'm not sure how this is two dictionaries") public void testCalculatesCornersInBackgroundWithTwoDictionariesButDisposed() { TestRxSchedulers.backgroundRunOneJob(); mSubscribeState.dispose(); Assert.assertEquals(GestureTypingDetector.LoadingState.LOADING, mCurrentState.get()); TestRxSchedulers.backgroundRunOneJob(); Assert.assertEquals(GestureTypingDetector.LoadingState.LOADING, mCurrentState.get()); mDetectorUnderTest.destroy(); TestRxSchedulers.drainAllTasks(); Assert.assertEquals(GestureTypingDetector.LoadingState.LOADING, mCurrentState.get()); }
public static boolean matchIpRange(String pattern, String host, int port) throws UnknownHostException { if (pattern == null || host == null) { throw new IllegalArgumentException( "Illegal Argument pattern or hostName. Pattern:" + pattern + ", Host:" + host); } pattern = pattern.trim(); if ("*.*.*.*".equals(pattern) || "*".equals(pattern)) { return true; } InetAddress inetAddress = InetAddress.getByName(host); boolean isIpv4 = isValidV4Address(inetAddress); String[] hostAndPort = getPatternHostAndPort(pattern, isIpv4); if (hostAndPort[1] != null && !hostAndPort[1].equals(String.valueOf(port))) { return false; } pattern = hostAndPort[0]; String splitCharacter = SPLIT_IPV4_CHARACTER; if (!isIpv4) { splitCharacter = SPLIT_IPV6_CHARACTER; } String[] mask = pattern.split(splitCharacter); // check format of pattern checkHostPattern(pattern, mask, isIpv4); host = inetAddress.getHostAddress(); if (pattern.equals(host)) { return true; } // short name condition if (!ipPatternContainExpression(pattern)) { InetAddress patternAddress = InetAddress.getByName(pattern); return patternAddress.getHostAddress().equals(host); } String[] ipAddress = host.split(splitCharacter); for (int i = 0; i < mask.length; i++) { if ("*".equals(mask[i]) || mask[i].equals(ipAddress[i])) { continue; } else if (mask[i].contains("-")) { String[] rangeNumStrs = StringUtils.split(mask[i], '-'); if (rangeNumStrs.length != 2) { throw new IllegalArgumentException("There is wrong format of ip Address: " + mask[i]); } Integer min = getNumOfIpSegment(rangeNumStrs[0], isIpv4); Integer max = getNumOfIpSegment(rangeNumStrs[1], isIpv4); Integer ip = getNumOfIpSegment(ipAddress[i], isIpv4); if (ip < min || ip > max) { return false; } } else if ("0".equals(ipAddress[i]) && ("0".equals(mask[i]) || "00".equals(mask[i]) || "000".equals(mask[i]) || "0000".equals(mask[i]))) { continue; } else if (!mask[i].equals(ipAddress[i])) { return false; } } return true; }
@Test void testMatchIpRangeMatchWhenIpv4() throws UnknownHostException { assertTrue(NetUtils.matchIpRange("*.*.*.*", "192.168.1.63", 90)); assertTrue(NetUtils.matchIpRange("192.168.1.*", "192.168.1.63", 90)); assertTrue(NetUtils.matchIpRange("192.168.1.63", "192.168.1.63", 90)); assertTrue(NetUtils.matchIpRange("192.168.1.1-65", "192.168.1.63", 90)); assertFalse(NetUtils.matchIpRange("192.168.1.1-61", "192.168.1.63", 90)); assertFalse(NetUtils.matchIpRange("192.168.1.62", "192.168.1.63", 90)); }
public static List<String> shellSplit(CharSequence string) { List<String> tokens = new ArrayList<>(); if ( string == null ) { return tokens; } boolean escaping = false; char quoteChar = ' '; boolean quoting = false; StringBuilder current = new StringBuilder() ; for (int i = 0; i<string.length(); i++) { char c = string.charAt(i); if (escaping) { current.append(c); escaping = false; } else if (c == '\\' && !(quoting && quoteChar == '\'')) { escaping = true; } else if (quoting && c == quoteChar) { quoting = false; } else if (!quoting && (c == '\'' || c == '"')) { quoting = true; quoteChar = c; } else if (!quoting && Character.isWhitespace(c)) { if (current.length() > 0) { tokens.add(current.toString()); current = new StringBuilder(); } } else { current.append(c); } } if (current.length() > 0) { tokens.add(current.toString()); } return tokens; }
@Test public void normalTokens() { assertEquals(Arrays.asList("a", "bee", "cee"), StringUtils.shellSplit("a\tbee cee")); }
public static String formatFlows(long flows) { if (flows < 1) { return EMPTY; } return String.valueOf(flows) + SPACE + (flows > 1 ? FLOWS : FLOW); }
@Test public void formatOneFlow() { String f = TopoUtils.formatFlows(1); assertEquals(AM_WL, "1 flow", f); }
@Override public void onEvent(Event event) { if (EnvUtil.getStandaloneMode()) { return; } if (event instanceof ClientEvent.ClientVerifyFailedEvent) { syncToVerifyFailedServer((ClientEvent.ClientVerifyFailedEvent) event); } else { syncToAllServer((ClientEvent) event); } }
@Test void testOnClientVerifyFailedEventWithoutClient() { when(clientManager.getClient(CLIENT_ID)).thenReturn(null); distroClientDataProcessor.onEvent(new ClientEvent.ClientVerifyFailedEvent(CLIENT_ID, MOCK_TARGET_SERVER)); verify(distroProtocol, never()).syncToTarget(any(), any(), anyString(), anyLong()); verify(distroProtocol, never()).sync(any(), any()); }
@Override public boolean tryStartNewSegment( TieredStorageSubpartitionId subpartitionId, int segmentId, int minNumBuffers) { boolean canStartNewSegment = nettyConnectionEstablished[subpartitionId.getSubpartitionId()] // Ensure that a subpartition's memory tier does not excessively use // buffers, which may result in insufficient buffers for other subpartitions && subpartitionProducerAgents[subpartitionId.getSubpartitionId()] .numQueuedBuffers() < subpartitionMaxQueuedBuffers && (memoryManager.getMaxNonReclaimableBuffers(getMemoryTierName()) - memoryManager.numOwnerRequestedBuffer( getMemoryTierName())) > Math.max(numBuffersPerSegment, minNumBuffers) && memoryManager.ensureCapacity( Math.max(numBuffersPerSegment, minNumBuffers)); if (canStartNewSegment) { subpartitionProducerAgents[subpartitionId.getSubpartitionId()].updateSegmentId( segmentId); } return canStartNewSegment; }
@Test void testTryStartNewSegment() { try (MemoryTierProducerAgent memoryTierProducerAgent = createMemoryTierProducerAgent(false)) { assertThat(memoryTierProducerAgent.tryStartNewSegment(SUBPARTITION_ID, 0, 0)).isFalse(); memoryTierProducerAgent.connectionEstablished( SUBPARTITION_ID, new TestingNettyConnectionWriter.Builder().build()); assertThat(memoryTierProducerAgent.tryStartNewSegment(SUBPARTITION_ID, 0, 0)).isTrue(); } }
@Override public String getConfig(String dataId, String group, long timeoutMs) throws NacosException { return getConfigInner(namespace, dataId, group, timeoutMs); }
@Test void testGetConfigFromServer() throws NacosException { final String dataId = "1"; final String group = "2"; final String tenant = ""; final int timeout = 3000; ConfigResponse response = new ConfigResponse(); response.setContent("aa"); response.setConfigType("bb"); Mockito.when(mockWoker.getServerConfig(dataId, group, "", timeout, false)).thenReturn(response); final String config = nacosConfigService.getConfig(dataId, group, timeout); assertEquals("aa", config); Mockito.verify(mockWoker, Mockito.times(1)).getServerConfig(dataId, group, tenant, timeout, false); }
@VisibleForTesting public SmsChannelDO validateSmsChannel(Long channelId) { SmsChannelDO channelDO = smsChannelService.getSmsChannel(channelId); if (channelDO == null) { throw exception(SMS_CHANNEL_NOT_EXISTS); } if (CommonStatusEnum.isDisable(channelDO.getStatus())) { throw exception(SMS_CHANNEL_DISABLE); } return channelDO; }
@Test public void testValidateSmsChannel_notExists() { // 准备参数 Long channelId = randomLongId(); // 调用,校验异常 assertServiceException(() -> smsTemplateService.validateSmsChannel(channelId), SMS_CHANNEL_NOT_EXISTS); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testNonContiguousOneOfProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(NonContiguousOneOf.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); // equality doesn't work between dynamic messages and other, // so we compare string representation assertEquals( NONCONTIGUOUS_ONEOF_ROW.toString(), toRow.apply(toDynamic(NONCONTIGUOUS_ONEOF_PROTO)).toString()); }
public static void main(String[] args) { var vm = new VirtualMachine( new Wizard(45, 7, 11, 0, 0), new Wizard(36, 18, 8, 0, 0)); vm.execute(InstructionConverterUtil.convertToByteCode(LITERAL_0)); vm.execute(InstructionConverterUtil.convertToByteCode(LITERAL_0)); vm.execute(InstructionConverterUtil.convertToByteCode(String.format(HEALTH_PATTERN, "GET"))); vm.execute(InstructionConverterUtil.convertToByteCode(LITERAL_0)); vm.execute(InstructionConverterUtil.convertToByteCode(GET_AGILITY)); vm.execute(InstructionConverterUtil.convertToByteCode(LITERAL_0)); vm.execute(InstructionConverterUtil.convertToByteCode(GET_WISDOM)); vm.execute(InstructionConverterUtil.convertToByteCode(ADD)); vm.execute(InstructionConverterUtil.convertToByteCode(LITERAL_2)); vm.execute(InstructionConverterUtil.convertToByteCode(DIVIDE)); vm.execute(InstructionConverterUtil.convertToByteCode(ADD)); vm.execute(InstructionConverterUtil.convertToByteCode(String.format(HEALTH_PATTERN, "SET"))); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public void commitVersion() { isVersionCommitted = true; }
@Test void testJobVersionerOnCommitVersionIsIncreased() { // GIVEN Job job = aScheduledJob().withVersion(5).build(); // WHEN JobVersioner jobVersioner = new JobVersioner(job); // THEN assertThat(job).hasVersion(6); // WHEN jobVersioner.commitVersion(); // THEN assertThat(job).hasVersion(6); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String returnCommand = null; String subCommand = safeReadLine(reader, false); if (subCommand.equals(FIELD_GET_SUB_COMMAND_NAME)) { returnCommand = getField(reader); } else if (subCommand.equals(FIELD_SET_SUB_COMMAND_NAME)) { returnCommand = setField(reader); } else { returnCommand = Protocol.getOutputErrorCommand("Unknown Field SubCommand Name: " + subCommand); } logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testObject() { String inputCommand = "g\n" + target + "\nfield20\ne\n"; try { command.execute("f", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!yro1\n", sWriter.toString()); } catch (Exception e) { e.printStackTrace(); fail(); } }
@VisibleForTesting void validateClientIdExists(Long id, String clientId) { OAuth2ClientDO client = oauth2ClientMapper.selectByClientId(clientId); if (client == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的客户端 if (id == null) { throw exception(OAUTH2_CLIENT_EXISTS); } if (!client.getId().equals(id)) { throw exception(OAUTH2_CLIENT_EXISTS); } }
@Test public void testValidateClientIdExists_withId() { // mock 数据 OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("tudou"); oauth2ClientMapper.insert(client); // 准备参数 Long id = randomLongId(); String clientId = "tudou"; // 调用,不会报错 assertServiceException(() -> oauth2ClientService.validateClientIdExists(id, clientId), OAUTH2_CLIENT_EXISTS); }
public String getSignature() { return this.signature; }
@Test public void testGetSignature() { Assert.assertEquals("EjRWeJA=", authorizationHeader.getSignature()); }
public static Collection<DataNode> buildDataNode(final DataNode dataNode, final Map<String, Collection<String>> dataSources) { if (!dataSources.containsKey(dataNode.getDataSourceName())) { return Collections.singletonList(dataNode); } Collection<DataNode> result = new LinkedList<>(); for (String each : dataSources.get(dataNode.getDataSourceName())) { result.add(new DataNode(each, dataNode.getTableName())); } return result; }
@Test void assertBuildDataNodeWithoutSameDataSource() { DataNode dataNode = new DataNode("read_ds.t_order"); Collection<DataNode> dataNodes = DataNodeUtils.buildDataNode(dataNode, Collections.singletonMap("readwrite_ds", Arrays.asList("ds_0", "shadow_ds_0"))); assertThat(dataNodes.size(), is(1)); assertThat(dataNodes.iterator().next().getDataSourceName(), is("read_ds")); }
public static String gmtDate() { return GMT_FMT.format(LocalDateTime.now().atZone(GMT_ZONE_ID)); }
@Test public void testGmtDate() { String gmt = DateKit.gmtDate(date); Assert.assertEquals("Wed, 20 Sep 2017 15:27:50 GMT", gmt); gmt = DateKit.gmtDate(localDateTime); Assert.assertEquals("Wed, 20 Sep 2017 15:20:10 GMT", gmt); Assert.assertNotNull(DateKit.gmtDate()); }
@Override public long getDelay(TimeUnit unit) { return unit.convert(Math.max(getExpiration() - time.hiResClockMs(), 0), TimeUnit.MILLISECONDS); }
@Test public void testGetDelay() { MockTime time = new MockTime(); TimerTaskList list = new TimerTaskList(new AtomicInteger(0), time); list.setExpiration(time.hiResClockMs() + 10000L); time.sleep(5000L); assertEquals(5L, list.getDelay(TimeUnit.SECONDS)); }
@Override public boolean exists(final LinkOption... options) { NSURL resolved = null; try { resolved = this.lock(false); if(null == resolved) { return super.exists(options); } return Files.exists(Paths.get(resolved.path())); } catch(AccessDeniedException e) { return super.exists(options); } finally { this.release(resolved); } }
@Test public void testMoveFolder() throws Exception { final String name = UUID.randomUUID().toString(); final String newname = UUID.randomUUID().toString(); new DefaultLocalDirectoryFeature().mkdir(new FinderLocal(name)); new FinderLocal(name).rename(new FinderLocal(newname)); assertFalse(new FinderLocal(name).exists()); assertTrue(new FinderLocal(newname).exists()); }
@Override public CoordinatorRecord deserialize( ByteBuffer keyBuffer, ByteBuffer valueBuffer ) throws RuntimeException { final short recordType = readVersion(keyBuffer, "key"); final ApiMessage keyMessage = apiMessageKeyFor(recordType); readMessage(keyMessage, keyBuffer, recordType, "key"); if (valueBuffer == null) { return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null); } final ApiMessage valueMessage = apiMessageValueFor(recordType); final short valueVersion = readVersion(valueBuffer, "value"); readMessage(valueMessage, valueBuffer, valueVersion, "value"); return new CoordinatorRecord( new ApiMessageAndVersion(keyMessage, recordType), new ApiMessageAndVersion(valueMessage, valueVersion) ); }
@Test public void testDeserializeWithInvalidRecordType() { GroupCoordinatorRecordSerde serde = new GroupCoordinatorRecordSerde(); ByteBuffer keyBuffer = ByteBuffer.allocate(64); keyBuffer.putShort((short) 255); keyBuffer.rewind(); ByteBuffer valueBuffer = ByteBuffer.allocate(64); CoordinatorLoader.UnknownRecordTypeException ex = assertThrows(CoordinatorLoader.UnknownRecordTypeException.class, () -> serde.deserialize(keyBuffer, valueBuffer)); assertEquals((short) 255, ex.unknownType()); }
public Set<Integer> nodesThatShouldBeDown(ClusterState state) { return calculate(state).nodesThatShouldBeDown(); }
@Test void non_uniform_group_sizes_are_supported() { GroupAvailabilityCalculator calc = calcForHierarchicCluster( DistributionBuilder.withGroupNodes(1, 2, 3, 4), 0.67); assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:10")), equalTo(emptySet())); // Group 0 has only 1 node and should not cause any other nodes to be taken down assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:10 .0.s:d")), equalTo(emptySet())); // Too little availability in group 1 assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:10 .1.s:d")), equalTo(indices(2))); // Too little availability in group 2 assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:10 .3.s:d")), equalTo(indices(4, 5))); // Group 4 has 75% availability (>= 67%), so no auto take-down there assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:10 .7.s:d")), equalTo(emptySet())); // Drop group 4 availability to 50%; it should now be taken down entirely assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:10 storage:9 .7.s:d")), equalTo(indices(6, 8))); }
public boolean record(final Throwable observation) { final long timestampMs; DistinctObservation distinctObservation; timestampMs = clock.time(); synchronized (this) { distinctObservation = find(distinctObservations, observation); if (null == distinctObservation) { distinctObservation = newObservation(timestampMs, observation); if (INSUFFICIENT_SPACE == distinctObservation) { return false; } } } final int offset = distinctObservation.offset; buffer.getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); buffer.putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampMs); return true; }
@Test void shouldRecordFirstObservation() { final long timestamp = 7; final int offset = 0; final RuntimeException error = new RuntimeException("Test Error"); when(clock.time()).thenReturn(timestamp); assertTrue(log.record(error)); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putBytes(eq(offset + ENCODED_ERROR_OFFSET), any(byte[].class)); inOrder.verify(buffer).putLong(offset + FIRST_OBSERVATION_TIMESTAMP_OFFSET, timestamp); inOrder.verify(buffer).putIntOrdered(eq(offset + LENGTH_OFFSET), anyInt()); inOrder.verify(buffer).getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); inOrder.verify(buffer).putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestamp); }
public static SolrSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), SolrSinkConfig.class); }
@Test public final void loadFromYamlFileTest() throws IOException { File yamlFile = getFile("sinkConfig.yaml"); String path = yamlFile.getAbsolutePath(); SolrSinkConfig config = SolrSinkConfig.load(path); assertNotNull(config); assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot"); assertEquals(config.getSolrMode(), "SolrCloud"); assertEquals(config.getSolrCollection(), "techproducts"); assertEquals(config.getSolrCommitWithinMs(), Integer.parseInt("100")); assertEquals(config.getUsername(), "fakeuser"); assertEquals(config.getPassword(), "fake@123"); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_SINT8() { final MutableData data = new MutableData(new byte[1]); data.setValue(-120, Data.FORMAT_SINT8, 0); assertArrayEquals(new byte[] { (byte) 0x88 } , data.getValue()); }
public void setConfigAttributes(Object attributes, TaskFactory taskFactory) { clear(); if (attributes == null) { return; } if (taskFactory == null) throw new IllegalArgumentException("ConfigContext cannot be null"); Map attributeMap = (Map) attributes; String taskType = (String) attributeMap.get(TASK_OPTIONS); Task task = taskFactory.taskInstanceFor(taskType); task.setConfigAttributes(attributeMap.get(taskType), taskFactory); add(task); }
@Test public void shouldSetConfigAttributesForBuiltinTask() throws Exception { HashMap attributes = new HashMap(); attributes.put(Tasks.TASK_OPTIONS, "ant"); attributes.put("ant", antTaskAttribs("build.xml", "test", "foo")); TaskFactory taskFactory = mock(TaskFactory.class); AntTask antTask = new AntTask(); when(taskFactory.taskInstanceFor(antTask.getTaskType())).thenReturn(antTask); Tasks tasks = new Tasks(); Tasks spy = spy(tasks); spy.setConfigAttributes(attributes, taskFactory); assertThat(spy.size(), is(1)); assertThat(spy.get(0), is(antTask("build.xml", "test", "foo"))); }
@Override public V remove(K key) { if (!isEmpty()) { // Implementación de eliminación aquí } return null; }
@Test public void testRemove() { BinarySearchTree<Integer, String> tree = new BinarySearchTree<>(Comparator.naturalOrder()); tree.insert(5, "Five"); tree.insert(3, "Three"); tree.insert(7, "Seven"); tree.insert(1, "One"); assertEquals("Five", tree.remove(5)); assertNull(tree.find(5)); assertEquals("One", tree.remove(1)); assertNull(tree.find(1)); }
@Override public DynamicTableSink createDynamicTableSink(Context context) { Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions()); checkArgument(!StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.PATH)), "Option [path] should not be empty."); setupTableOptions(conf.getString(FlinkOptions.PATH), conf); ResolvedSchema schema = context.getCatalogTable().getResolvedSchema(); sanityCheck(conf, schema); setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema); setupSortOptions(conf, context.getConfiguration()); return new HoodieTableSink(conf, schema); }
@Test void testTableTypeCheck() { ResolvedSchema schema = SchemaBuilder.instance() .field("f0", DataTypes.INT().notNull()) .field("f1", DataTypes.VARCHAR(20)) .field("f2", DataTypes.TIMESTAMP(3)) .field("ts", DataTypes.TIMESTAMP(3)) .primaryKey("f0") .build(); // Table type unset. The default value will be ok final MockContext sourceContext1 = MockContext.getInstance(this.conf, schema, "f2"); assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext1)); // Invalid table type will throw exception if the hoodie.properties does not exist. this.conf.setString(FlinkOptions.PATH, tempFile.getAbsolutePath() + "_NOT_EXIST_TABLE_PATH"); this.conf.set(FlinkOptions.TABLE_TYPE, "INVALID_TABLE_TYPE"); final MockContext sourceContext2 = MockContext.getInstance(this.conf, schema, "f2"); assertThrows(HoodieValidationException.class, () -> new HoodieTableFactory().createDynamicTableSink(sourceContext2)); this.conf.setString(FlinkOptions.PATH, tempFile.getAbsolutePath()); // Invalid table type will be ok if the hoodie.properties exists. this.conf.set(FlinkOptions.TABLE_TYPE, "INVALID_TABLE_TYPE"); final MockContext sourceContext3 = MockContext.getInstance(this.conf, schema, "f2"); assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext3)); // Valid table type will be ok this.conf.set(FlinkOptions.TABLE_TYPE, "MERGE_ON_READ"); final MockContext sourceContext4 = MockContext.getInstance(this.conf, schema, "f2"); assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext4)); // Setup the table type correctly for hoodie.properties HoodieTableSink hoodieTableSink = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sourceContext4); assertThat(hoodieTableSink.getConf().get(FlinkOptions.TABLE_TYPE), is("COPY_ON_WRITE")); // Valid table type will be ok this.conf.set(FlinkOptions.TABLE_TYPE, "COPY_ON_WRITE"); final MockContext sourceContext5 = MockContext.getInstance(this.conf, schema, "f2"); assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext5)); }
@Override public boolean handleResult(int returncode, GoPublisher goPublisher) { if (returncode == HttpURLConnection.HTTP_NOT_FOUND) { deleteQuietly(checksumFile); goPublisher.taggedConsumeLineWithPrefix(GoPublisher.ERR, "[WARN] The md5checksum property file was not found on the server. Hence, Go can not verify the integrity of the artifacts."); return true; } if (returncode == HttpURLConnection.HTTP_NOT_MODIFIED) { LOG.info("[Agent Fetch Artifact] Not downloading checksum file as it has not changed"); return true; } if (returncode == HttpURLConnection.HTTP_OK) { LOG.info("[Agent Fetch Artifact] Saved checksum property file [{}]", checksumFile); return true; } return returncode < HttpURLConnection.HTTP_BAD_REQUEST; }
@Test public void shouldHandleResultIfHttpCodeSaysFilePermissionDenied() { StubGoPublisher goPublisher = new StubGoPublisher(); assertThat(checksumFileHandler.handleResult(HttpServletResponse.SC_FORBIDDEN, goPublisher), is(false)); }
public Span nextSpan(Message message) { TraceContextOrSamplingFlags extracted = extractAndClearTraceIdProperties(processorExtractor, message, message); Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler. // When an upstream context was not present, lookup keys are unlikely added if (extracted.context() == null && !result.isNoop()) { // simplify code by re-using an existing MessagingRequest impl tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result); } return result; }
@Test void nextSpan_should_use_span_from_headers_as_parent() { setStringProperty(message, "b3", "0000000000000001-0000000000000002-1"); Span span = jmsTracing.nextSpan(message); assertThat(span.context().parentId()).isEqualTo(2L); }
public static Type[] getReturnTypes(Invocation invocation) { try { if (invocation != null && invocation.getInvoker() != null && invocation.getInvoker().getUrl() != null && invocation.getInvoker().getInterface() != GenericService.class && !invocation.getMethodName().startsWith("$")) { Type[] returnTypes = null; if (invocation instanceof RpcInvocation) { returnTypes = ((RpcInvocation) invocation).getReturnTypes(); if (returnTypes != null) { return returnTypes; } } String service = invocation.getInvoker().getUrl().getServiceInterface(); if (StringUtils.isNotEmpty(service)) { Method method = getMethodByService(invocation, service); if (method != null) { returnTypes = ReflectUtils.getReturnTypes(method); } } if (returnTypes != null) { return returnTypes; } } } catch (Throwable t) { logger.warn(COMMON_REFLECTIVE_OPERATION_FAILED, "", "", t.getMessage(), t); } return null; }
@Test void testGetReturnTypesUseCache() throws Exception { Class<?> demoServiceClass = DemoService.class; String serviceName = demoServiceClass.getName(); Invoker invoker = createMockInvoker( URL.valueOf( "test://127.0.0.1:1/org.apache.dubbo.rpc.support.DemoService?interface=org.apache.dubbo.rpc.support.DemoService")); RpcInvocation inv = new RpcInvocation( "testReturnType", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); Type[] types = RpcUtils.getReturnTypes(inv); Assertions.assertNotNull(types); Assertions.assertEquals(2, types.length); Assertions.assertEquals(String.class, types[0]); Assertions.assertEquals(String.class, types[1]); Assertions.assertArrayEquals(types, inv.getReturnTypes()); RpcInvocation inv1 = new RpcInvocation( "testReturnType1", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); java.lang.reflect.Type[] types1 = RpcUtils.getReturnTypes(inv1); Assertions.assertNotNull(types1); Assertions.assertEquals(2, types1.length); Assertions.assertEquals(List.class, types1[0]); Assertions.assertEquals( demoServiceClass.getMethod("testReturnType1", String.class).getGenericReturnType(), types1[1]); Assertions.assertArrayEquals(types1, inv1.getReturnTypes()); RpcInvocation inv2 = new RpcInvocation( "testReturnType2", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); java.lang.reflect.Type[] types2 = RpcUtils.getReturnTypes(inv2); Assertions.assertNotNull(types2); Assertions.assertEquals(2, types2.length); Assertions.assertEquals(String.class, types2[0]); Assertions.assertEquals(String.class, types2[1]); Assertions.assertArrayEquals(types2, inv2.getReturnTypes()); RpcInvocation inv3 = new RpcInvocation( "testReturnType3", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); java.lang.reflect.Type[] types3 = RpcUtils.getReturnTypes(inv3); Assertions.assertNotNull(types3); Assertions.assertEquals(2, types3.length); Assertions.assertEquals(List.class, types3[0]); java.lang.reflect.Type genericReturnType3 = demoServiceClass.getMethod("testReturnType3", String.class).getGenericReturnType(); Assertions.assertEquals(((ParameterizedType) genericReturnType3).getActualTypeArguments()[0], types3[1]); Assertions.assertArrayEquals(types3, inv3.getReturnTypes()); RpcInvocation inv4 = new RpcInvocation( "testReturnType4", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); java.lang.reflect.Type[] types4 = RpcUtils.getReturnTypes(inv4); Assertions.assertNotNull(types4); Assertions.assertEquals(2, types4.length); Assertions.assertNull(types4[0]); Assertions.assertNull(types4[1]); Assertions.assertArrayEquals(types4, inv4.getReturnTypes()); RpcInvocation inv5 = new RpcInvocation( "testReturnType5", serviceName, "", new Class<?>[] {String.class}, null, null, invoker, null); java.lang.reflect.Type[] types5 = RpcUtils.getReturnTypes(inv5); Assertions.assertNotNull(types5); Assertions.assertEquals(2, types5.length); Assertions.assertEquals(Map.class, types5[0]); java.lang.reflect.Type genericReturnType5 = demoServiceClass.getMethod("testReturnType5", String.class).getGenericReturnType(); Assertions.assertEquals(((ParameterizedType) genericReturnType5).getActualTypeArguments()[0], types5[1]); Assertions.assertArrayEquals(types5, inv5.getReturnTypes()); }
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) { assert ctx.executor().inEventLoop(); return service.pulsar().getTransactionMetadataStoreService() .verifyTxnOwnership(txnID, getPrincipal()) .thenComposeAsync(isOwner -> { if (isOwner) { return CompletableFuture.completedFuture(true); } if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) { return isSuperUser(); } else { return CompletableFuture.completedFuture(false); } }, ctx.executor()); }
@Test(timeOut = 30000) public void sendEndTxnResponse() throws Exception { final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class); when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class))); when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true)); when(txnStore.endTransaction(any(TxnID.class), anyInt(), anyBoolean())) .thenReturn(CompletableFuture.completedFuture(null)); when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore); svcConfig.setTransactionCoordinatorEnabled(true); resetChannel(); setChannelConnected(); ByteBuf clientCommand = Commands.serializeWithSize(Commands.newEndTxn(89L, 1L, 12L, TxnAction.COMMIT)); channel.writeInbound(clientCommand); CommandEndTxnResponse response = (CommandEndTxnResponse) getResponse(); assertEquals(response.getRequestId(), 89L); assertEquals(response.getTxnidLeastBits(), 1L); assertEquals(response.getTxnidMostBits(), 12L); assertFalse(response.hasError()); assertFalse(response.hasMessage()); channel.finish(); }
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" + " including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.") @GetMapping public DatabaseMigrationsResponse getStatus() { Optional<Long> currentVersion = databaseVersion.getVersion(); checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB); DatabaseVersion.Status status = databaseVersion.getStatus(); if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) { return new DatabaseMigrationsResponse(databaseMigrationState); } else if (!database.getDialect().supportsMigration()) { return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED); } else { return switch (databaseMigrationState.getStatus()) { case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState); case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED); default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS); }; } }
@Test void getStatus_whenDowngradeRequired_returnNone() throws Exception { when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.REQUIRES_DOWNGRADE); when(migrationState.getStatus()).thenReturn(NONE); mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(), content().json("{\"status\":\"NO_MIGRATION\",\"message\":\"Database is up-to-date, no migration needed.\"}")); }
public LogChannelFileWriterBuffer getLogChannelFileWriterBuffer( String id ) { synchronized ( syncObject ) { LogChannelFileWriterBuffer fileWriterBuffer = this.fileWriterBuffers.get( id ); if ( fileWriterBuffer != null ) { return fileWriterBuffer; } ConcurrentHashMap<LogChannelFileWriterBuffer, List<String>> possibleWriters = new ConcurrentHashMap<>(); for ( Map.Entry<String, LogChannelFileWriterBuffer> entry : this.fileWriterBuffers.entrySet() ) { final String bufferId = entry.getKey(); List<String> logChannelChildren = getLogChannelChildren( bufferId ); if ( logChannelChildren.contains( id ) ) { possibleWriters.put( entry.getValue(), logChannelChildren ); } } return determineLogChannelFileWriterBuffer( possibleWriters ); } }
@Test public void getLogChannelFileWriterBufferTest() { LoggingRegistry loggingRegistry = LoggingRegistry.getInstance(); Map<String, LogChannelFileWriterBuffer> fileWriterBuffers = getDummyFileWriterBuffers(); loggingRegistry.setFileWriterBuffers( fileWriterBuffers ); loggingRegistry.setChildrenMap( getDummyChildrenMap() ); assertEquals( "dc8c1482-30ab-4d0f-b9f6-e4c32a627bf0", loggingRegistry.getLogChannelFileWriterBuffer( "dcffc35f-c74f-4e37-b463-97313998ea20" ).getLogChannelId() ); //Switch the order of the writers fileWriterBuffers.remove( "7c1526bc-789e-4f5a-8d68-1f9c39488ceb" ); fileWriterBuffers.put( "7c1526bc-789e-4f5a-8d68-1f9c39488ceb", new LogChannelFileWriterBuffer( "7c1526bc-789e-4f5a-8d68-1f9c39488ceb" ) ); loggingRegistry.setFileWriterBuffers( fileWriterBuffers ); //regardless of the order of the writers the correct the same should be selected assertEquals( "dc8c1482-30ab-4d0f-b9f6-e4c32a627bf0", loggingRegistry.getLogChannelFileWriterBuffer( "dcffc35f-c74f-4e37-b463-97313998ea20" ).getLogChannelId() ); }
protected static List<LastOpenedDTO> filterForExistingIdAndCapAtMaximum(final LastOpenedForUserDTO loi, final GRN grn, final long max) { return loi.items().stream().filter(i -> !i.grn().equals(grn)).limit(max - 1).toList(); }
@Test public void testDontRemoveIfExistsInList() { var _1 = grnRegistry.newGRN(GRNTypes.DASHBOARD, "1"); var _2 = grnRegistry.newGRN(GRNTypes.SEARCH, "2"); LastOpenedForUserDTO dto = new LastOpenedForUserDTO("userId", List.of(new LastOpenedDTO(_1, DateTime.now(DateTimeZone.UTC)))); var result = StartPageService.filterForExistingIdAndCapAtMaximum(dto, _2, MAX); assertThat(result.isEmpty()).isFalse(); }
public final ResourceSkyline getResourceSkyline() { return resourceSkyline; }
@Test public final void testGetJobSize() { Assert.assertEquals(jobMetaData.getResourceSkyline().getJobInputDataSize(), 1024.5, 0); }
@Override public void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { @SuppressWarnings("unchecked") List<String> localDirs = ctx.getExecutionAttribute(CONTAINER_LOCAL_DIRS); @SuppressWarnings("unchecked") Map<org.apache.hadoop.fs.Path, List<String>> resources = ctx.getExecutionAttribute(LOCALIZED_RESOURCES); @SuppressWarnings("unchecked") List<String> commands = ctx.getExecutionAttribute(CONTAINER_RUN_CMDS); Map<String, String> env = ctx.getContainer().getLaunchContext().getEnvironment(); String username = ctx.getExecutionAttribute(USER); if(!isSandboxContainerWhitelisted(username, commands)) { String tmpDirBase = configuration.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ContainerExecutionException("hadoop.tmp.dir not set!"); } try { String containerID = ctx.getExecutionAttribute(CONTAINER_ID_STR); initializePolicyDir(); List<String> groupPolicyFiles = getGroupPolicyFiles(configuration, ctx.getExecutionAttribute(USER)); Path policyFilePath = Files.createFile( Paths.get(policyFileDir.toString(), containerID + "-" + NMContainerPolicyUtils.POLICY_FILE), POLICY_ATTR); try(OutputStream policyOutputStream = Files.newOutputStream(policyFilePath)) { containerPolicies.put(containerID, policyFilePath); NMContainerPolicyUtils.generatePolicyFile(policyOutputStream, localDirs, groupPolicyFiles, resources, configuration); NMContainerPolicyUtils.appendSecurityFlags( commands, env, policyFilePath, sandboxMode); } } catch (IOException e) { throw new ContainerExecutionException(e); } } }
@Test public void testDisabledSandboxWithWhitelist() throws ContainerExecutionException { String[] inputCommand = { "java jar MyJob.jar" }; List<String> commands = Arrays.asList(inputCommand); conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_GROUP, WHITELIST_GROUP); runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER); runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands); runtime.prepareContainer(runtimeContextBuilder.build()); Assert.assertTrue("Command should not be modified when user is " + "member of whitelisted group", inputCommand[0].equals(commands.get(0))); }
@Override public Object pageListService(String namespaceId, String groupName, String serviceName, int pageNo, int pageSize, String instancePattern, boolean ignoreEmptyService) throws NacosException { ObjectNode result = JacksonUtils.createEmptyJsonNode(); List<ServiceView> serviceViews = new LinkedList<>(); Collection<Service> services = patternServices(namespaceId, groupName, serviceName); if (ignoreEmptyService) { services = services.stream().filter(each -> 0 != serviceStorage.getData(each).ipCount()) .collect(Collectors.toList()); } result.put(FieldsConstants.COUNT, services.size()); services = doPage(services, pageNo - 1, pageSize); for (Service each : services) { ServiceMetadata serviceMetadata = metadataManager.getServiceMetadata(each).orElseGet(ServiceMetadata::new); ServiceView serviceView = new ServiceView(); serviceView.setName(each.getName()); serviceView.setGroupName(each.getGroup()); serviceView.setClusterCount(serviceStorage.getClusters(each).size()); serviceView.setIpCount(serviceStorage.getData(each).ipCount()); serviceView.setHealthyInstanceCount(countHealthyInstance(serviceStorage.getData(each))); serviceView.setTriggerFlag(isProtectThreshold(serviceView, serviceMetadata) ? "true" : "false"); serviceViews.add(serviceView); } result.set(FieldsConstants.SERVICE_LIST, JacksonUtils.transferToJsonNode(serviceViews)); return result; }
@Test void testPageListServiceForIgnoreEmptyService() throws NacosException { ServiceInfo serviceInfo = new ServiceInfo(); Mockito.when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo); ObjectNode obj = (ObjectNode) catalogServiceV2Impl.pageListService("A", "B", "C", 1, 10, null, true); assertEquals(0, obj.get(FieldsConstants.COUNT).asInt()); }
@Override public UserDetails loadUserByUsername(String userId) throws UsernameNotFoundException { User user = null; try { user = this.identityService.createUserQuery() .userId(userId) .singleResult(); } catch (FlowableException ex) { // don't care } if (null == user) { throw new UsernameNotFoundException( String.format("user (%s) could not be found", userId)); } return createFlowableUser(user); }
@Test public void testLoadingKnownUserWithSomePrivileges() { UserDetails fozzie = userDetailsService.loadUserByUsername("fozzie"); assertThat(fozzie).isNotNull(); assertThat(fozzie.isCredentialsNonExpired()).as("credentialsNonExpired").isTrue(); assertThat(fozzie.isAccountNonLocked()).as("accountNonLocked").isTrue(); assertThat(fozzie.isAccountNonExpired()).as("accountNonExpired").isTrue(); assertThat(fozzie.isEnabled()).as("enabled").isTrue(); assertThat(fozzie.getUsername()).as("username").isEqualTo("fozzie"); assertThat(fozzie.getPassword()).as("password").isEqualTo("fozzie"); assertThat(fozzie.getAuthorities()) .extracting(GrantedAuthority::getAuthority) .as("granted authorities") .containsExactly( "start processes" ); assertThat(fozzie).isInstanceOf(FlowableUserDetails.class); FlowableUserDetails fozzieFlowable = (FlowableUserDetails) fozzie; User user = fozzieFlowable.getUser(); assertThat(user.getId()).isEqualTo("fozzie"); assertThat(user.getFirstName()).isEqualTo("Fozzie"); assertThat(user.getLastName()).isEqualTo("Bear"); assertThat(user.getDisplayName()).isEqualTo("Fozzie Bear"); assertThat(user.getEmail()).isEqualTo("fozzie@muppetshow.com"); assertThat(user.getPassword()).isEqualTo("fozzie"); user.setId("test"); user.setFirstName("test"); user.setLastName("test"); user.setDisplayName("test"); user.setEmail("test"); assertThat(user.getId()).isEqualTo("fozzie"); assertThat(user.getFirstName()).isEqualTo("Fozzie"); assertThat(user.getLastName()).isEqualTo("Bear"); assertThat(user.getDisplayName()).isEqualTo("Fozzie Bear"); assertThat(user.getEmail()).isEqualTo("fozzie@muppetshow.com"); assertThat(fozzieFlowable.getGroups()) .extracting(Group::getId, Group::getName, Group::getType) .as("Groups") .containsExactlyInAnyOrder( tuple("sales", "Sales", "user") ); }
@Override protected void registerMetadata(final MetaDataRegisterDTO dto) { if (dto.isRegisterMetaData()) { MetaDataService metaDataService = getMetaDataService(); MetaDataDO exist = metaDataService.findByPath(dto.getPath()); metaDataService.saveOrUpdateMetaData(exist, dto); } }
@Test public void testRegisterMetadata() { MetaDataDO metaDataDO = MetaDataDO.builder().build(); when(metaDataService.findByPath(any())).thenReturn(metaDataDO); MetaDataRegisterDTO metaDataDTO = MetaDataRegisterDTO.builder().registerMetaData(true).build(); shenyuClientRegisterWebSocketService.registerMetadata(metaDataDTO); verify(metaDataService).saveOrUpdateMetaData(metaDataDO, metaDataDTO); }
@Override public Mono<GetVersionedProfileResponse> getVersionedProfile(final GetVersionedProfileAnonymousRequest request) { final ServiceIdentifier targetIdentifier = ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getRequest().getAccountIdentifier()); if (targetIdentifier.identityType() != IdentityType.ACI) { throw Status.INVALID_ARGUMENT.withDescription("Expected ACI service identifier").asRuntimeException(); } return getTargetAccountAndValidateUnidentifiedAccess(targetIdentifier, request.getUnidentifiedAccessKey().toByteArray()) .flatMap(targetAccount -> ProfileGrpcHelper.getVersionedProfile(targetAccount, profilesManager, request.getRequest().getVersion())); }
@Test void getVersionedProfilePniInvalidArgument() { final byte[] unidentifiedAccessKey = TestRandomUtil.nextBytes(UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH); final GetVersionedProfileAnonymousRequest request = GetVersionedProfileAnonymousRequest.newBuilder() .setUnidentifiedAccessKey(ByteString.copyFrom(unidentifiedAccessKey)) .setRequest(GetVersionedProfileRequest.newBuilder() .setAccountIdentifier(ServiceIdentifier.newBuilder() .setIdentityType(IdentityType.IDENTITY_TYPE_PNI) .setUuid(ByteString.copyFrom(UUIDUtil.toBytes(UUID.randomUUID()))) .build()) .setVersion("someVersion") .build()) .build(); assertStatusException(Status.INVALID_ARGUMENT, () -> unauthenticatedServiceStub().getVersionedProfile(request)); }
@Override public boolean add(final Integer value) { return add(value.intValue()); }
@Test public void setsWithTheDifferentSizesAreNotEqual() { final IntHashSet other = new IntHashSet(100, -1); set.add(1); set.add(1001); other.add(1001); assertNotEquals(set, other); }
@Override public OverlayData createOverlayData(ComponentName remoteApp) { if (!OS_SUPPORT_FOR_ACCENT) { return EMPTY; } try { final ActivityInfo activityInfo = mLocalContext .getPackageManager() .getActivityInfo(remoteApp, PackageManager.GET_META_DATA); final Context context = mLocalContext.createPackageContext(remoteApp.getPackageName(), CONTEXT_IGNORE_SECURITY); context.setTheme(activityInfo.getThemeResource()); fetchRemoteColors(mCurrentOverlayData, context); Logger.d( "OverlyDataCreatorForAndroid", "For component %s we fetched %s", remoteApp, mCurrentOverlayData); return mCurrentOverlayData; } catch (Exception e) { Logger.w("OverlyDataCreatorForAndroid", e, "Failed to fetch colors for %s", remoteApp); return EMPTY; } }
@Test public void testGetReferenceColorsHappyPath() { setupReturnedColors(R.style.HappyPathReferenceColors); final OverlayData overlayData = mUnderTest.createOverlayData(mComponentName); Assert.assertEquals(Color.parseColor("#ffcc9900"), overlayData.getPrimaryColor()); // notice: we also changing the alpha channel Assert.assertEquals(Color.parseColor("#ffcc9911"), overlayData.getPrimaryDarkColor()); Assert.assertEquals(Color.parseColor("#ffff0000"), overlayData.getPrimaryTextColor()); Assert.assertTrue(overlayData.isValid()); }
public static String unwrapHtmlTag(String content, String... tagNames) { return removeHtmlTag(content, false, tagNames); }
@Test public void unwrapHtmlTagTest() { //非闭合标签 String str = "pre<img src=\"xxx/dfdsfds/test.jpg\">"; String result = HtmlUtil.unwrapHtmlTag(str, "img"); assertEquals("pre", result); //闭合标签 str = "pre<img>"; result = HtmlUtil.unwrapHtmlTag(str, "img"); assertEquals("pre", result); //闭合标签 str = "pre<img src=\"xxx/dfdsfds/test.jpg\" />"; result = HtmlUtil.unwrapHtmlTag(str, "img"); assertEquals("pre", result); //闭合标签 str = "pre<img />"; result = HtmlUtil.unwrapHtmlTag(str, "img"); assertEquals("pre", result); //闭合标签 str = "pre<img/>"; result = HtmlUtil.unwrapHtmlTag(str, "img"); assertEquals("pre", result); //包含内容标签 str = "pre<div class=\"test_div\">abc</div>"; result = HtmlUtil.unwrapHtmlTag(str, "div"); assertEquals("preabc", result); //带换行 str = "pre<div class=\"test_div\">\r\n\t\tabc\r\n</div>"; result = HtmlUtil.unwrapHtmlTag(str, "div"); assertEquals("pre\r\n\t\tabc\r\n", result); }
public IpAddress.Version version() { return address.version(); }
@Test public void testVersion() { IpPrefix ipPrefix; // IPv4 ipPrefix = IpPrefix.valueOf("0.0.0.0/0"); assertThat(ipPrefix.version(), is(IpAddress.Version.INET)); // IPv6 ipPrefix = IpPrefix.valueOf("::/0"); assertThat(ipPrefix.version(), is(IpAddress.Version.INET6)); }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void testSetNullDouble() { Settings settings = new MapSettings(); settings.setProperty("foo", (Double) null); assertThat(settings.getDouble("foo")).isNull(); }