focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Set<Pod> pods() { return ImmutableSet.copyOf(kubevirtPodStore.pods()); }
@Test public void testGetPods() { createBasicPods(); assertEquals("Number of pods did not match", 1, target.pods().size()); }
public ImmutableList<GlobalSetting> parse(final InputStream is) { return Jsons.toObjects(is, GlobalSetting.class); }
@Test public void should_parse_setting_file_with_file_root() { InputStream stream = getResourceAsStream("settings/fileroot-settings.json"); ImmutableList<GlobalSetting> globalSettings = parser.parse(stream); assertThat(globalSettings.get(0).includes().get(0), is(join("src", "test", "resources", "settings", "fileroot.json"))); assertThat(globalSettings.get(0).getContext(), is("/fileroot")); assertThat(globalSettings.get(0).getFileRoot(), is("src/test/resources")); }
@Override public void applyToAllPartitions( TwoOutputApplyPartitionFunction<OUT1, OUT2> applyPartitionFunction) throws Exception { if (isKeyed) { for (Object key : keySet) { partitionedContext .getStateManager() .executeInKeyContext( () -> { try { applyPartitionFunction.apply( firstCollector, secondCollector, partitionedContext); } catch (Exception e) { throw new RuntimeException(e); } }, key); } } else { // non-keyed operator has only one partition. applyPartitionFunction.apply(firstCollector, secondCollector, partitionedContext); } }
@Test void testApplyToAllPartitions() throws Exception { AtomicInteger counter = new AtomicInteger(0); List<Integer> collectedFromFirstOutput = new ArrayList<>(); List<Long> collectedFromSecondOutput = new ArrayList<>(); TestingTimestampCollector<Integer> firstCollector = TestingTimestampCollector.<Integer>builder() .setCollectConsumer(collectedFromFirstOutput::add) .build(); TestingTimestampCollector<Long> secondCollector = TestingTimestampCollector.<Long>builder() .setCollectConsumer(collectedFromSecondOutput::add) .build(); CompletableFuture<Void> cf = new CompletableFuture<>(); StreamingRuntimeContext operatorRuntimeContext = ContextTestUtils.createStreamingRuntimeContext(); DefaultRuntimeContext runtimeContext = new DefaultRuntimeContext( operatorRuntimeContext.getJobInfo().getJobName(), operatorRuntimeContext.getJobType(), 1, 2, "mock-task", operatorRuntimeContext.getMetricGroup()); DefaultTwoOutputNonPartitionedContext<Integer, Long> nonPartitionedContext = new DefaultTwoOutputNonPartitionedContext<>( runtimeContext, new DefaultPartitionedContext( runtimeContext, Optional::empty, (key) -> cf.complete(null), UnsupportedProcessingTimeManager.INSTANCE, ContextTestUtils.createStreamingRuntimeContext(), new MockOperatorStateStore()), firstCollector, secondCollector, false, null); nonPartitionedContext.applyToAllPartitions( (firstOutput, secondOutput, ctx) -> { counter.incrementAndGet(); firstOutput.collect(10); secondOutput.collect(20L); }); assertThat(counter.get()).isEqualTo(1); assertThat(cf).isNotCompleted(); assertThat(collectedFromFirstOutput).containsExactly(10); assertThat(collectedFromSecondOutput).containsExactly(20L); }
@SuppressWarnings("unchecked") @Override public OUT extract(Object in) { return (OUT) Array.get(in, fieldId); }
@Test void testIntArray() { for (int i = 0; i < this.testIntArray.length; i++) { assertThat(new FieldFromArray<Integer>(i).extract(testIntArray)) .isEqualTo(new Integer(testIntArray[i])); } }
public boolean verifySignedPip(ASN1Sequence signedPip) throws BsnkException { ASN1Sequence signedPipContent = (ASN1Sequence) signedPip.getObjectAt(1); ASN1Sequence pipSignatureSequence = (ASN1Sequence) signedPip.getObjectAt(2); ASN1Sequence pipSignature = (ASN1Sequence) pipSignatureSequence.getObjectAt(1); byte[] pipBytes; try { pipBytes = signedPipContent.getEncoded(); } catch (IOException ex) { throw new BsnkException("SignedPipIOFault", "Failed to get byte[] from pip or pip signature.", ex); } BigInteger pipKsv = ((ASN1Integer) signedPipContent.getObjectAt(2)).getValue(); if (!pipKsv.equals(bsnkUKsv)) { throw new BsnkException("SignedpipKsvMismatch", String.format("Signedpip ksv mismatch. U: '%s'. Pip: '%s'", bsnkUKsv, pipKsv), null); } String oid = ((ASN1ObjectIdentifier) pipSignatureSequence.getObjectAt(0)).getId(); BigInteger r = ((ASN1Integer) pipSignature.getObjectAt(0)).getValue(); BigInteger s = ((ASN1Integer) pipSignature.getObjectAt(1)).getValue(); SignatureEcdsa signature = (SignatureEcdsa) SignatureEcdsa.from(oid, r, s); try { signature.verify(bsnkUPubkey, pipBytes); return true; } catch (CryptoException ex) { logger.error(String.format("Exception during pip verification: '%s", ex.getMessage())); return false; } }
@Test public void verifySignedPipFailTest() throws IOException, BsnkException, InvalidKeySpecException, NoSuchAlgorithmException, NoSuchProviderException { String invalidUBase64 = "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; ECPublicKey invalidU = (ECPublicKey) PublicKeyParser.decodeKey(Base64.getDecoder().decode(invalidUBase64)); ReflectionTestUtils.setField(bsnkUtil, "bsnkUPubkey", invalidU); assertFalse(bsnkUtil.verifySignedPip(signedPip)); }
@SuppressWarnings("deprecation") public boolean setSocketOpt(int option, Object optval) { final ValueReference<Boolean> result = new ValueReference<>(false); switch (option) { case ZMQ.ZMQ_SNDHWM: sendHwm = (Integer) optval; if (sendHwm < 0) { throw new IllegalArgumentException("sendHwm " + optval); } return true; case ZMQ.ZMQ_RCVHWM: recvHwm = (Integer) optval; if (recvHwm < 0) { throw new IllegalArgumentException("recvHwm " + optval); } return true; case ZMQ.ZMQ_AFFINITY: affinity = (Long) optval; return true; case ZMQ.ZMQ_IDENTITY: byte[] val = parseBytes(option, optval); if (val == null || val.length > 255) { throw new IllegalArgumentException("identity must not be null or less than 255 " + optval); } identity = Arrays.copyOf(val, val.length); identitySize = (short) identity.length; return true; case ZMQ.ZMQ_RATE: rate = (Integer) optval; return true; case ZMQ.ZMQ_RECOVERY_IVL: recoveryIvl = (Integer) optval; return true; case ZMQ.ZMQ_SNDBUF: sndbuf = (Integer) optval; return true; case ZMQ.ZMQ_RCVBUF: rcvbuf = (Integer) optval; return true; case ZMQ.ZMQ_TOS: tos = (Integer) optval; return true; case ZMQ.ZMQ_LINGER: linger = (Integer) optval; return true; case ZMQ.ZMQ_RECONNECT_IVL: reconnectIvl = (Integer) optval; if (reconnectIvl < -1) { throw new IllegalArgumentException("reconnectIvl " + optval); } return true; case ZMQ.ZMQ_RECONNECT_IVL_MAX: reconnectIvlMax = (Integer) optval; if (reconnectIvlMax < 0) { throw new IllegalArgumentException("reconnectIvlMax " + optval); } return true; case ZMQ.ZMQ_BACKLOG: backlog = (Integer) optval; return true; case ZMQ.ZMQ_MAXMSGSIZE: maxMsgSize = (Long) optval; return true; case ZMQ.ZMQ_MULTICAST_HOPS: multicastHops = (Integer) optval; return true; case ZMQ.ZMQ_RCVTIMEO: recvTimeout = (Integer) optval; return true; case ZMQ.ZMQ_SNDTIMEO: sendTimeout = (Integer) optval; return true; /* Deprecated in favor of ZMQ_IPV6 */ case ZMQ.ZMQ_IPV4ONLY: return setSocketOpt(ZMQ.ZMQ_IPV6, !parseBoolean(option, optval)); /* To replace the somewhat surprising IPV4ONLY */ case ZMQ.ZMQ_IPV6: ipv6 = parseBoolean(option, optval); return true; case ZMQ.ZMQ_SOCKS_PROXY: socksProxyAddress = parseString(option, optval); return true; case ZMQ.ZMQ_TCP_KEEPALIVE: tcpKeepAlive = ((Number) optval).intValue(); if (tcpKeepAlive != -1 && tcpKeepAlive != 0 && tcpKeepAlive != 1) { throw new IllegalArgumentException("tcpKeepAlive only accepts one of -1,0,1 " + optval); } return true; case ZMQ.ZMQ_TCP_KEEPALIVE_CNT: this.tcpKeepAliveCnt = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE: this.tcpKeepAliveIdle = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL: this.tcpKeepAliveIntvl = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_IMMEDIATE: immediate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT: immediate = !parseBoolean(option, optval); return true; case ZMQ.ZMQ_TCP_ACCEPT_FILTER: String filterStr = parseString(option, optval); if (filterStr == null) { tcpAcceptFilters.clear(); } else if (filterStr.isEmpty() || filterStr.length() > 255) { throw new IllegalArgumentException("tcp_accept_filter " + optval); } else { TcpAddressMask filter = new TcpAddressMask(filterStr, ipv6); tcpAcceptFilters.add(filter); } return true; case ZMQ.ZMQ_PLAIN_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.PLAIN : Mechanisms.NULL); return true; case ZMQ.ZMQ_PLAIN_USERNAME: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainUsername = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_PLAIN_PASSWORD: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainPassword = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_ZAP_DOMAIN: String domain = parseString(option, optval); if (domain != null && domain.length() < 256) { zapDomain = domain; return true; } throw new IllegalArgumentException("zap domain length shall be < 256 : " + optval); case ZMQ.ZMQ_CURVE_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.CURVE : Mechanisms.NULL); return true; case ZMQ.ZMQ_CURVE_PUBLICKEY: curvePublicKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SECRETKEY: curveSecretKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SERVERKEY: curveServerKey = setCurveKey(option, optval, result); if (curveServerKey == null) { asServer = false; } return result.get(); case ZMQ.ZMQ_CONFLATE: conflate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_GSSAPI_SERVER: asServer = parseBoolean(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PRINCIPAL: gssPrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL: gssServicePrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PLAINTEXT: gssPlaintext = parseBoolean(option, optval); return true; case ZMQ.ZMQ_HANDSHAKE_IVL: handshakeIvl = (Integer) optval; if (handshakeIvl < 0) { throw new IllegalArgumentException("handshakeIvl only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_IVL: heartbeatInterval = (Integer) optval; if (heartbeatInterval < 0) { throw new IllegalArgumentException("heartbeatInterval only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TIMEOUT: heartbeatTimeout = (Integer) optval; if (heartbeatTimeout < 0) { throw new IllegalArgumentException("heartbeatTimeout only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TTL: Integer value = (Integer) optval; // Convert this to deciseconds from milliseconds value /= 100; if (value >= 0 && value <= 6553) { heartbeatTtl = value; } else { throw new IllegalArgumentException("heartbeatTtl is out of range [0..655399]" + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_CONTEXT: heartbeatContext = (byte[]) optval; if (heartbeatContext == null) { throw new IllegalArgumentException("heartbeatContext cannot be null"); } return true; case ZMQ.ZMQ_DECODER: decoder = checkCustomCodec(optval, IDecoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_ENCODER: encoder = checkCustomCodec(optval, IEncoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_MSG_ALLOCATOR: if (optval instanceof String) { try { allocator = allocator(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { allocator = allocator((Class<?>) optval); return true; } else if (optval instanceof MsgAllocator) { allocator = (MsgAllocator) optval; return true; } return false; case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD: Integer allocationHeapThreshold = (Integer) optval; allocator = new MsgAllocatorThreshold(allocationHeapThreshold); return true; case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER: if (optval instanceof String) { try { selectorChooser = chooser(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { selectorChooser = chooser((Class<?>) optval); return true; } else if (optval instanceof SelectorProviderChooser) { selectorChooser = (SelectorProviderChooser) optval; return true; } return false; case ZMQ.ZMQ_HELLO_MSG: if (optval == null) { helloMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { helloMsg = null; } else { helloMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_DISCONNECT_MSG: if (optval == null) { disconnectMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { disconnectMsg = null; } else { disconnectMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_HICCUP_MSG: if (optval == null) { hiccupMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { hiccupMsg = null; } else { hiccupMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_AS_TYPE: this.asType = (Integer) optval; return true; case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME: this.selfAddressPropertyName = parseString(option, optval); return true; default: throw new IllegalArgumentException("Unknown Option " + option); } }
@Test(expected = IllegalArgumentException.class) public void testHeartbeatTimeoutUnderflow() { options.setSocketOpt(ZMQ.ZMQ_HEARTBEAT_TIMEOUT, -1); }
@Override public void processElement(final StreamRecord<T> element) throws Exception { final T event = element.getValue(); final long previousTimestamp = element.hasTimestamp() ? element.getTimestamp() : Long.MIN_VALUE; final long newTimestamp = timestampAssigner.extractTimestamp(event, previousTimestamp); element.setTimestamp(newTimestamp); output.collect(element); watermarkGenerator.onEvent(event, newTimestamp, wmOutput); }
@Test void periodicWatermarksBatchMode() throws Exception { OneInputStreamOperatorTestHarness<Long, Long> testHarness = createBatchHarness( WatermarkStrategy.forGenerator((ctx) -> new PeriodicWatermarkGenerator()) .withTimestampAssigner((ctx) -> new LongExtractor())); testHarness.processElement(new StreamRecord<>(2L, 1)); testHarness.setProcessingTime(AUTO_WATERMARK_INTERVAL); assertThat(pollNextStreamRecord(testHarness)).is(matching(streamRecord(2L, 2L))); assertThat(pollNextLegacyWatermark(testHarness)).isNull(); testHarness.processElement(new StreamRecord<>(4L, 1)); testHarness.setProcessingTime(AUTO_WATERMARK_INTERVAL * 2); assertThat(pollNextStreamRecord(testHarness)).is(matching(streamRecord(4L, 4L))); assertThat(pollNextLegacyWatermark(testHarness)).isNull(); }
public static String toIpString(InetSocketAddress address) { if (address == null) { return null; } else { InetAddress inetAddress = address.getAddress(); return inetAddress == null ? address.getHostName() : inetAddress.getHostAddress(); } }
@Test public void toIpString() throws Exception { }
@Override public synchronized ManagerSpec getManagerSpec() { Set<Long> rootGroupIds = new HashSet<>(); Map<Long, ResourceGroupSpec> resourceGroupSpecMap = new HashMap<>(); Map<Long, ResourceGroupIdTemplate> resourceGroupIdTemplateMap = new HashMap<>(); Map<Long, ResourceGroupSpecBuilder> recordMap = new HashMap<>(); Map<Long, Set<Long>> subGroupIdsToBuild = new HashMap<>(); populateFromDbHelper(recordMap, rootGroupIds, resourceGroupIdTemplateMap, subGroupIdsToBuild); // Build up resource group specs from root to leaf for (LinkedList<Long> queue = new LinkedList<>(rootGroupIds); !queue.isEmpty(); ) { Long id = queue.pollFirst(); resourceGroupIdTemplateMap.computeIfAbsent(id, k -> { ResourceGroupSpecBuilder builder = recordMap.get(k); return ResourceGroupIdTemplate.forSubGroupNamed( resourceGroupIdTemplateMap.get(builder.getParentId().get()), builder.getNameTemplate().toString()); }); Set<Long> childrenToBuild = subGroupIdsToBuild.getOrDefault(id, ImmutableSet.of()); // Add to resource group specs if no more child resource groups are left to build if (childrenToBuild.isEmpty()) { ResourceGroupSpecBuilder builder = recordMap.get(id); ResourceGroupSpec resourceGroupSpec = builder.build(); resourceGroupSpecMap.put(id, resourceGroupSpec); // Add this resource group spec to parent subgroups and remove id from subgroup ids to build builder.getParentId().ifPresent(parentId -> { recordMap.get(parentId).addSubGroup(resourceGroupSpec); subGroupIdsToBuild.get(parentId).remove(id); }); } else { // Add this group back to queue since it still has subgroups to build queue.addFirst(id); // Add this group's subgroups to the queue so that when this id is dequeued again childrenToBuild will be empty queue.addAll(0, childrenToBuild); } } // Specs are built from db records, validate and return manager spec List<ResourceGroupSpec> rootGroups = rootGroupIds.stream().map(resourceGroupSpecMap::get).collect(toList()); List<SelectorSpec> selectors = resourceGroupsDao.getSelectors(environment) .stream() .map(selectorRecord -> new SelectorSpec( selectorRecord.getUserRegex(), selectorRecord.getSourceRegex(), selectorRecord.getQueryType(), selectorRecord.getClientTags(), selectorRecord.getSelectorResourceEstimate(), selectorRecord.getClientInfoRegex(), selectorRecord.getSchema(), selectorRecord.getPrincipalRegex(), resourceGroupIdTemplateMap.get(selectorRecord.getResourceGroupId())) ).collect(toList()); ManagerSpec managerSpec = new ManagerSpec(rootGroups, selectors, getCpuQuotaPeriodFromDb()); return managerSpec; }
@Test public void testSubgroups() { H2DaoProvider daoProvider = setup("test_dup_roots"); H2ResourceGroupsDao dao = daoProvider.get(); dao.createResourceGroupsGlobalPropertiesTable(); dao.createResourceGroupsTable(); dao.createSelectorsTable(); dao.insertResourceGroup(1, "global", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, null, ENVIRONMENT); dao.insertResourceGroup(2, "subTo1-1", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, 1L, ENVIRONMENT); dao.insertResourceGroup(3, "subTo1-2", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, 1L, ENVIRONMENT); dao.insertResourceGroup(4, "subTo2-1", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, 2L, ENVIRONMENT); dao.insertResourceGroup(5, "subTo2-2", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, 2L, ENVIRONMENT); dao.insertResourceGroup(6, "subTo3", "1MB", 1000, 100, 100, null, null, null, null, null, null, null, null, 0, 3L, ENVIRONMENT); DbManagerSpecProvider dbManagerSpecProvider = new DbManagerSpecProvider(daoProvider.get(), ENVIRONMENT, new ReloadingResourceGroupConfig()); ManagerSpec managerSpec = dbManagerSpecProvider.getManagerSpec(); assertEquals(managerSpec.getRootGroups().size(), 1); assertEquals(managerSpec.getRootGroups().get(0).getName().toString(), "global"); assertEquals(managerSpec.getRootGroups().get(0).getSubGroups().size(), 2); assertEquals(managerSpec.getRootGroups().get(0).getSubGroups().get(0).getName().toString(), "subTo1-1"); assertEquals(managerSpec.getRootGroups().get(0).getSubGroups().get(0).getSubGroups().size(), 2); assertEquals(managerSpec.getRootGroups().get(0).getSubGroups().get(0).getSubGroups().get(1).getName().toString(), "subTo2-2"); }
@VisibleForTesting public BlockPlacementPolicy getBlockPlacementPolicy() { return placementPolicies.getPolicy(CONTIGUOUS); }
@Test public void testUseDelHint() { DatanodeStorageInfo delHint = new DatanodeStorageInfo( DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id")); List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint); List<StorageType> excessTypes = new ArrayList<>(); BlockPlacementPolicyDefault policyDefault = (BlockPlacementPolicyDefault) bm.getBlockPlacementPolicy(); excessTypes.add(StorageType.DEFAULT); Assert.assertTrue(policyDefault.useDelHint(delHint, null, moreThan1Racks, null, excessTypes)); excessTypes.remove(0); excessTypes.add(StorageType.SSD); Assert.assertFalse(policyDefault.useDelHint(delHint, null, moreThan1Racks, null, excessTypes)); }
@Override public byte[] serialize(final String topic, final TimestampedKeyAndJoinSide<K> data) { final byte boolByte = (byte) (data.isLeftSide() ? 1 : 0); final byte[] keyBytes = keySerializer.serialize(topic, data.getKey()); final byte[] timestampBytes = timestampSerializer.serialize(topic, data.getTimestamp()); return ByteBuffer .allocate(timestampBytes.length + 1 + keyBytes.length) .put(timestampBytes) .put(boolByte) .put(keyBytes) .array(); }
@Test public void shouldSerializeKeyWithJoinSideAsTrue() { final String value = "some-string"; final TimestampedKeyAndJoinSide<String> timestampedKeyAndJoinSide = TimestampedKeyAndJoinSide.makeLeft(value, 10); final byte[] serialized = STRING_SERDE.serializer().serialize(TOPIC, timestampedKeyAndJoinSide); assertThat(serialized, is(notNullValue())); final TimestampedKeyAndJoinSide<String> deserialized = STRING_SERDE.deserializer().deserialize(TOPIC, serialized); assertThat(deserialized, is(timestampedKeyAndJoinSide)); }
@Override protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { String fullName = aliases.getOrDefault(name, name); PluginClassLoader pluginLoader = pluginClassLoader(fullName); if (pluginLoader != null) { log.trace("Retrieving loaded class '{}' from '{}'", fullName, pluginLoader); return pluginLoader.loadClass(fullName, resolve); } return super.loadClass(fullName, resolve); }
@Test @SuppressWarnings({"unchecked", "rawtypes"}) public void testEmptyLoadClass() throws ClassNotFoundException { when(parent.loadClass(ARBITRARY, false)).thenReturn((Class) ARBITRARY_CLASS); assertSame(ARBITRARY_CLASS, classLoader.loadClass(ARBITRARY, false)); }
public void isNotIn(@Nullable Iterable<?> iterable) { checkNotNull(iterable); if (Iterables.contains(iterable, actual)) { failWithActual("expected not to be any of", iterable); } }
@Test public void isNotInEmpty() { assertThat("b").isNotIn(ImmutableList.<String>of()); }
@PutMapping("/{id}") public ShenyuAdminResult updateSelector(@PathVariable("id") @Valid @Existed(provider = SelectorMapper.class, message = "selector is not existed") final String id, @Valid @RequestBody final SelectorDTO selectorDTO) { selectorDTO.setId(id); Integer updateCount = selectorService.createOrUpdate(selectorDTO); return ShenyuAdminResult.success(ShenyuResultMessage.UPDATE_SUCCESS, updateCount); }
@Test public void updateSelector() throws Exception { SelectorDTO selectorDTO = SelectorDTO.builder() .id("123") .name("test123") .continued(true) .type(1) .loged(true) .enabled(true) .matchRestful(false) .pluginId("2") .sort(1) .namespaceId(SYS_DEFAULT_NAMESPACE_ID) .build(); SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(SelectorMapper.class)).thenReturn(selectorMapper); when(selectorMapper.existed(selectorDTO.getId())).thenReturn(true); when(SpringBeanUtils.getInstance().getBean(PluginMapper.class)).thenReturn(pluginMapper); when(pluginMapper.existed(selectorDTO.getPluginId())).thenReturn(true); when(SpringBeanUtils.getInstance().getBean(NamespaceMapper.class)).thenReturn(namespaceMapper); when(namespaceMapper.existed(SYS_DEFAULT_NAMESPACE_ID)).thenReturn(true); given(this.selectorService.createOrUpdate(selectorDTO)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.put("/selector/{id}", "123") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(selectorDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS))) .andReturn(); }
String buildCustomMessage(EventNotificationContext ctx, SlackEventNotificationConfig config, String template) throws PermanentEventNotificationException { final List<MessageSummary> backlog = getMessageBacklog(ctx, config); Map<String, Object> model = getCustomMessageModel(ctx, config.type(), backlog, config.timeZone()); try { LOG.debug("customMessage: template = {} model = {}", template, model); return templateEngine.transform(template, model); } catch (Exception e) { String error = "Invalid Custom Message template."; LOG.error(error + "[{}]", e.toString()); throw new PermanentEventNotificationException(error + e, e.getCause()); } }
@Test public void buildCustomMessage() throws PermanentEventNotificationException { String s = slackEventNotification.buildCustomMessage(eventNotificationContext, slackEventNotificationConfig, "${thisDoesnotExist}"); assertThat(s).isEmpty(); String expectedCustomMessage = slackEventNotification.buildCustomMessage(eventNotificationContext, slackEventNotificationConfig, "test"); assertThat(expectedCustomMessage).isNotEmpty(); }
@Override public List<String> getServerList() { return serverList.isEmpty() ? serversFromEndpoint : serverList; }
@Test void testConstructEndpointContextPathPriority() throws Exception { clientProperties.setProperty(PropertyKeyConst.ENDPOINT_CONTEXT_PATH, "aaa"); clientProperties.setProperty(PropertyKeyConst.CONTEXT_PATH, "bbb"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT_CLUSTER_NAME, "ccc"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT, "127.0.0.1"); Mockito.reset(nacosRestTemplate); Mockito.when(nacosRestTemplate.get(eq("http://127.0.0.1:8080/aaa/ccc"), any(), any(), any())) .thenReturn(httpRestResult); serverListManager = new ServerListManager(clientProperties, "test"); List<String> serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); }
public boolean isSubscribed(String groupName, String serviceName) { String subId = NamingUtils.getGroupedName(serviceName, groupName); return selectorManager.isSubscribed(subId); }
@Test void testIsSubscribed() { List<String> clusters = Collections.singletonList(CLUSTER_STR_CASE); EventListener listener = Mockito.mock(EventListener.class); NamingSelector selector = NamingSelectorFactory.newClusterSelector(clusters); assertFalse(instancesChangeNotifier.isSubscribed(GROUP_CASE, SERVICE_NAME_CASE)); NamingSelectorWrapper wrapper = new NamingSelectorWrapper(SERVICE_NAME_CASE, GROUP_CASE, CLUSTER_STR_CASE, selector, listener); instancesChangeNotifier.registerListener(GROUP_CASE, SERVICE_NAME_CASE, wrapper); assertTrue(instancesChangeNotifier.isSubscribed(GROUP_CASE, SERVICE_NAME_CASE)); }
public static WindowBytesStoreSupplier persistentTimestampedWindowStore(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates) throws IllegalArgumentException { return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, true); }
@Test public void shouldThrowIfIPersistentTimestampedWindowStoreRetentionPeriodIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> Stores.persistentTimestampedWindowStore("anyName", ofMillis(-1L), ZERO, false)); assertEquals("retentionPeriod cannot be negative", e.getMessage()); }
public static <K, V> Read<K, V> read() { return new AutoValue_KafkaIO_Read.Builder<K, V>() .setTopics(new ArrayList<>()) .setTopicPartitions(new ArrayList<>()) .setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN) .setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES) .setMaxNumRecords(Long.MAX_VALUE) .setCommitOffsetsInFinalizeEnabled(false) .setDynamicRead(false) .setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime()) .setConsumerPollingTimeout(2L) .setRedistributed(false) .setAllowDuplicates(false) .setRedistributeNumKeys(0) .build(); }
@Test public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception { // Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes // properly from empty partitions, without missing messages added since checkpoint. // Initialize consumer with fewer elements than number of partitions so that some are empty. int initialNumElements = 5; UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source = mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null); for (int l = 0; l < initialNumElements; ++l) { advanceOnce(reader, l > 0); } // Checkpoint and restart, and confirm that the source continues correctly. KafkaCheckpointMark mark = CoderUtils.clone( source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark()); // Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that // the reader need to explicitly need to seek to first offset for partitions that were empty. int numElements = 100; // all the 20 partitions will have elements List<String> topics = ImmutableList.of("topic_a", "topic_b"); source = KafkaIO.<Integer, Long>read() .withBootstrapServers("none") .withTopics(topics) .withConsumerFactoryFn( new ConsumerFactoryFn(topics, 10, numElements, OffsetResetStrategy.LATEST)) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withMaxNumRecords(numElements) .withTimestampFn(new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); reader = source.createReader(null, mark); // Verify in any order. As the partitions are unevenly read, the returned records are not in a // simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder. List<Long> expected = new ArrayList<>(); List<Long> actual = new ArrayList<>(); for (long i = initialNumElements; i < numElements; i++) { advanceOnce(reader, i > initialNumElements); expected.add(i); actual.add(reader.getCurrent().getKV().getValue()); } assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray())); }
public static RepositoryMetadataStore getInstance() { return repositoryMetadataStore; }
@Test public void shouldPopulateDataCorrectly() throws Exception { PackageConfigurations repositoryConfigurationPut = new PackageConfigurations(); RepositoryMetadataStore.getInstance().addMetadataFor("plugin-id", repositoryConfigurationPut); assertThat(RepositoryMetadataStore.getInstance().getMetadata("plugin-id"), is(repositoryConfigurationPut)); }
@Override public <T> @Nullable Schema schemaFor(TypeDescriptor<T> typeDescriptor) { checkForDynamicType(typeDescriptor); return ProtoSchemaTranslator.getSchema((Class<Message>) typeDescriptor.getRawType()); }
@Test public void testPrimitiveSchema() { Schema schema = new ProtoMessageSchema().schemaFor(TypeDescriptor.of(Primitive.class)); assertEquals(PRIMITIVE_SCHEMA, schema); }
public static boolean isBlank(String value) { return value == null || value.isEmpty(); }
@Test void isBlankInputNotNullOutputFalse() { // Arrange final String value = "AAAAAAAA"; // Act final boolean retval = Util.isBlank(value); // Assert result assertThat(retval).isEqualTo(false); }
boolean contains(Point p) { return p.coordinateX >= this.coordinateX - this.width / 2 && p.coordinateX <= this.coordinateX + this.width / 2 && p.coordinateY >= this.coordinateY - this.height / 2 && p.coordinateY <= this.coordinateY + this.height / 2; }
@Test void containsTest() { var r = new Rect(10, 10, 20, 20); var b1 = new Bubble(2, 2, 1, 1); var b2 = new Bubble(30, 30, 2, 1); //r contains b1 and not b2 assertTrue(r.contains(b1)); assertFalse(r.contains(b2)); }
@Override public void close() { finished(); if (logAtInfo) { log.info("{}", this); } else { if (log.isDebugEnabled()) { log.debug("{}", this); } } }
@Test(expected = NullPointerException.class) public void testDurationInfoCreationWithNullMsg() { DurationInfo info = new DurationInfo(log, null); info.close(); }
public CompletableFuture<Map<TopicIdPartition, Acknowledgements>> commitSync( final Map<TopicIdPartition, Acknowledgements> acknowledgementsMap, final long deadlineMs) { final AtomicInteger resultCount = new AtomicInteger(); final CompletableFuture<Map<TopicIdPartition, Acknowledgements>> future = new CompletableFuture<>(); final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.of(future)); final Cluster cluster = metadata.fetch(); sessionHandlers.forEach((nodeId, sessionHandler) -> { Node node = cluster.nodeById(nodeId); if (node != null) { acknowledgeRequestStates.putIfAbsent(nodeId, new Pair<>(null, null)); // Ensure there is no commitSync()/close() request already present as they are blocking calls // and only one request can be active at a time. if (acknowledgeRequestStates.get(nodeId).getSyncRequest() != null && !acknowledgeRequestStates.get(nodeId).getSyncRequest().isEmpty()) { log.error("Attempt to call commitSync() when there is an existing sync request for node {}", node.id()); future.completeExceptionally( new IllegalStateException("Attempt to call commitSync() when there is an existing sync request for node : " + node.id())); } else { Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>(); for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { Acknowledgements acknowledgements = acknowledgementsMap.get(tip); if (acknowledgements != null) { acknowledgementsMapForNode.put(tip, acknowledgements); metricsManager.recordAcknowledgementSent(acknowledgements.size()); log.debug("Added sync acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); resultCount.incrementAndGet(); } } // There can only be one commitSync()/close() happening at a time. So per node, there will be one acknowledge request state representing commitSync() and close(). acknowledgeRequestStates.get(nodeId).setSyncRequest(new AcknowledgeRequestState(logContext, ShareConsumeRequestManager.class.getSimpleName() + ":1", deadlineMs, retryBackoffMs, retryBackoffMaxMs, sessionHandler, nodeId, acknowledgementsMapForNode, this::handleShareAcknowledgeSuccess, this::handleShareAcknowledgeFailure, resultHandler, AcknowledgeRequestType.COMMIT_SYNC )); } } }); resultHandler.completeIfEmpty(); return future; }
@Test public void testCommitSync() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); // normal fetch assertEquals(1, sendFetches()); assertFalse(shareConsumeRequestManager.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(1L, AcknowledgeType.ACCEPT); acknowledgements.add(2L, AcknowledgeType.ACCEPT); acknowledgements.add(3L, AcknowledgeType.REJECT); shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements), time.milliseconds() + 2000); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); completedAcknowledgements.clear(); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = (range.getLowEndPoint().compareTo(point) < 0 && range.getHighEndPoint().compareTo(point) > 0) || (range.getLowEndPoint().compareTo(point) == 0 && range.getLowBoundary() == RangeBoundary.CLOSED) || (range.getHighEndPoint().compareTo(point) == 0 && range.getHighBoundary() == RangeBoundary.CLOSED); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamsCantBeCompared() { FunctionTestUtil.assertResultError( includesFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class ); }
public final void hasSize(int expectedSize) { checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize); int actualSize = size(checkNotNull(actual)); check("size()").that(actualSize).isEqualTo(expectedSize); }
@Test @SuppressWarnings({"TruthIterableIsEmpty", "IsEmptyTruth"}) public void hasSizeZero() { assertThat(ImmutableList.of()).hasSize(0); }
@Override @Deprecated public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier, final String... stateStoreNames) { process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames); }
@SuppressWarnings("deprecation") @Test public void shouldProcessWithOldProcessorAndState() { final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String()); final StreamsBuilder builder = new StreamsBuilder(); final String input = "input"; builder.addStateStore(Stores.keyValueStoreBuilder( Stores.inMemoryKeyValueStore("sum"), Serdes.String(), Serdes.Integer() )); builder.stream(input, consumed) .process(() -> new org.apache.kafka.streams.processor.Processor<String, String>() { private KeyValueStore<String, Integer> sumStore; @Override public void init(final ProcessorContext context) { this.sumStore = context.getStateStore("sum"); } @Override public void process(final String key, final String value) { final Integer counter = sumStore.get(key); if (counter == null) { sumStore.putIfAbsent(key, value.length()); } else { if (value == null) { sumStore.delete(key); } else { sumStore.put(key, counter + value.length()); } } } @Override public void close() { } }, Named.as("p"), "sum"); final String topologyDescription = builder.build().describe().toString(); assertThat( topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input])\n" + " --> p\n" + " Processor: p (stores: [sum])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n") ); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic<String, String> inputTopic = driver.createInputTopic( input, new StringSerializer(), new StringSerializer() ); inputTopic.pipeInput("A", "0", 5L); inputTopic.pipeInput("B", "00", 100L); inputTopic.pipeInput("C", "000", 0L); inputTopic.pipeInput("D", "0000", 0L); inputTopic.pipeInput("A", "00000", 10L); inputTopic.pipeInput("A", "000000", 8L); final KeyValueStore<String, Integer> sumStore = driver.getKeyValueStore("sum"); assertEquals(12, sumStore.get("A").intValue()); assertEquals(2, sumStore.get("B").intValue()); assertEquals(3, sumStore.get("C").intValue()); assertEquals(4, sumStore.get("D").intValue()); } }
public static Read read() { return new AutoValue_MongoDbIO_Read.Builder() .setMaxConnectionIdleTime(60000) .setNumSplits(0) .setBucketAuto(false) .setSslEnabled(false) .setIgnoreSSLCertificate(false) .setSslInvalidHostNameAllowed(false) .setQueryFn(FindQuery.create()) .build(); }
@Test public void testReadWithFilter() { PCollection<Document> output = pipeline.apply( MongoDbIO.read() .withUri("mongodb://localhost:" + port) .withDatabase(DATABASE_NAME) .withCollection(COLLECTION_NAME) .withQueryFn(FindQuery.create().withFilters(Filters.eq("scientist", "Einstein")))); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(100L); pipeline.run(); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); checkNotNull(actual); if (!actual.entrySet().contains(entry)) { List<@Nullable Object> keyList = singletonList(key); List<@Nullable Object> valueList = singletonList(value); if (actual.containsKey(key)) { Object actualValue = actual.get(key); /* * In the case of a null expected or actual value, clarify that the key *is* present and * *is* expected to be present. That is, get() isn't returning null to indicate that the key * is missing, and the user isn't making an assertion that the key is missing. */ StandardSubjectBuilder check = check("get(%s)", key); if (value == null || actualValue == null) { check = check.withMessage("key is present but with a different value"); } // See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription. check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value); } else if (hasMatchingToStringPair(actual.keySet(), keyList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain keys", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entrySet()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (hasMatchingToStringPair(actual.values(), valueList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain values", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", entry); } } }
@Test public void containsExactly_bothExactAndToStringKeyMatches_showsExactKeyMatch() { ImmutableMap<Number, String> actual = ImmutableMap.of(1, "actual int", 1L, "actual long"); expectFailureWhenTestingThat(actual).containsEntry(1L, "expected long"); // should show the exact key match, 1="actual int", not the toString key match, 1L="actual long" assertFailureKeys("value of", "expected", "but was", "map was"); assertFailureValue("value of", "map.get(1)"); assertFailureValue("expected", "expected long"); assertFailureValue("but was", "actual long"); }
@ScalarOperator(GREATER_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThan(@SqlType(StandardTypes.TINYINT) long left, @SqlType(StandardTypes.TINYINT) long right) { return left > right; }
@Test public void testGreaterThan() { assertFunction("TINYINT'37' > TINYINT'37'", BOOLEAN, false); assertFunction("TINYINT'37' > TINYINT'17'", BOOLEAN, true); assertFunction("TINYINT'17' > TINYINT'37'", BOOLEAN, false); assertFunction("TINYINT'17' > TINYINT'17'", BOOLEAN, false); }
public static ProxyBackendHandler newInstance(final DatabaseType databaseType, final String sql, final SQLStatement sqlStatement, final ConnectionSession connectionSession, final HintValueContext hintValueContext) throws SQLException { if (sqlStatement instanceof EmptyStatement) { return new SkipBackendHandler(sqlStatement); } SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement) : new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), hintValueContext).bind(sqlStatement, Collections.emptyList()); QueryContext queryContext = new QueryContext(sqlStatementContext, sql, Collections.emptyList(), hintValueContext, connectionSession.getConnectionContext(), ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()); connectionSession.setQueryContext(queryContext); return newInstance(databaseType, queryContext, connectionSession, false); }
@Test void assertNewInstanceWithShow() throws SQLException { String sql = "SHOW VARIABLES LIKE '%x%'"; SQLStatement sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); ProxyBackendHandler actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(UnicastDatabaseBackendHandler.class)); sql = "SHOW VARIABLES WHERE Variable_name ='language'"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(UnicastDatabaseBackendHandler.class)); sql = "SHOW CHARACTER SET"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(UnicastDatabaseBackendHandler.class)); sql = "SHOW COLLATION"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(UnicastDatabaseBackendHandler.class)); }
public List<BlameLine> blame(Git git, String filename) { BlameResult blameResult; try { blameResult = git.blame() // Equivalent to -w command line option .setTextComparator(RawTextComparator.WS_IGNORE_ALL) .setFilePath(filename).call(); } catch (Exception e) { throw new IllegalStateException("Unable to blame file " + filename, e); } List<BlameLine> lines = new ArrayList<>(); if (blameResult == null) { LOG.debug("Unable to blame file {}. It is probably a symlink.", filename); return emptyList(); } for (int i = 0; i < blameResult.getResultContents().size(); i++) { if (blameResult.getSourceAuthor(i) == null || blameResult.getSourceCommit(i) == null) { LOG.debug("Unable to blame file {}. No blame info at line {}. Is file committed? [Author: {} Source commit: {}]", filename, i + 1, blameResult.getSourceAuthor(i), blameResult.getSourceCommit(i)); return emptyList(); } lines.add(new BlameLine() .date(blameResult.getSourceCommitter(i).getWhen()) .revision(blameResult.getSourceCommit(i).getName()) .author(blameResult.getSourceAuthor(i).getEmailAddress())); } return lines; }
@Test public void symlink_doesnt_fail() throws IOException { assumeTrue(!System2.INSTANCE.isOsWindows()); String relativePath2 = "src/main/java/org/dummy/Dummy2.java"; // Create symlink Files.createSymbolicLink(baseDir.resolve(relativePath2), baseDir.resolve(DUMMY_JAVA)); try (Git git = loadRepository(baseDir)) { jGitBlameCommand.blame(git, DUMMY_JAVA); jGitBlameCommand.blame(git, relativePath2); } }
@Override @Transactional public boolean updateAfterApproval(Long userId, Integer userType, String clientId, Map<String, Boolean> requestedScopes) { // 如果 requestedScopes 为空,说明没有要求,则返回 true 通过 if (CollUtil.isEmpty(requestedScopes)) { return true; } // 更新批准的信息 boolean success = false; // 需要至少有一个同意 LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT); for (Map.Entry<String, Boolean> entry : requestedScopes.entrySet()) { if (entry.getValue()) { success = true; } saveApprove(userId, userType, clientId, entry.getKey(), entry.getValue(), expireTime); } return success; }
@Test public void testUpdateAfterApproval_none() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); // 调用 boolean success = oauth2ApproveService.updateAfterApproval(userId, userType, clientId, null); // 断言 assertTrue(success); List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList(); assertEquals(0, result.size()); }
public long headSlowTimeMills(BlockingQueue<Runnable> q) { long slowTimeMills = 0; final Runnable peek = q.peek(); if (peek != null) { RequestTask rt = BrokerFastFailure.castRunnable(peek); slowTimeMills = rt == null ? 0 : this.messageStore.now() - rt.getCreateTimestamp(); } if (slowTimeMills < 0) { slowTimeMills = 0; } return slowTimeMills; }
@Test public void testHeadSlowTimeMills() throws Exception { BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig); brokerController.initialize(); BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); //create task is not instance of FutureTaskExt; Runnable runnable = new Runnable() { @Override public void run() { } }; RequestTask requestTask = new RequestTask(runnable, null, null); // the requestTask is not the head of queue; queue.add(new FutureTaskExt<>(requestTask, null)); long headSlowTimeMills = 100; TimeUnit.MILLISECONDS.sleep(headSlowTimeMills); assertThat(brokerController.headSlowTimeMills(queue)).isGreaterThanOrEqualTo(headSlowTimeMills); }
@Udf public String trim( @UdfParameter( description = "The string to trim") final String input) { if (input == null) { return null; } return input.trim(); }
@Test public void shouldReturnNullForNullInput() { final String result = udf.trim(null); assertThat(result, is(nullValue())); }
public void replayLoadDynamicPlugin(PluginInfo info) throws IOException, UserException { DynamicPluginLoader pluginLoader = new DynamicPluginLoader(Config.plugin_dir, info); try { // should add to "plugins" first before loading. PluginLoader checkLoader = plugins[info.getTypeId()].putIfAbsent(info.getName(), pluginLoader); if (checkLoader != null) { throw new UserException("plugin " + info.getName() + " has already been installed."); } pluginLoader.setStatus(PluginStatus.INSTALLING); // install plugin pluginLoader.reload(); pluginLoader.setStatus(PluginStatus.INSTALLED); } catch (IOException | UserException e) { pluginLoader.setStatus(PluginStatus.ERROR, e.getMessage()); LOG.warn("fail to load plugin", e); } finally { // this is a replay process, so whether it is successful or not, add it's name. addDynamicPluginNameIfAbsent(info.getName()); } }
@Test public void testLoadPluginFail() { try { PluginMgr pluginMgr = GlobalStateMgr.getCurrentState().getPluginMgr(); PluginInfo info = new PluginInfo(); info.name = "plugin-name"; info.type = PluginType.AUDIT; info.description = "plugin description"; info.version = DigitalVersion.CURRENT_STARROCKS_VERSION; info.javaVersion = DigitalVersion.JDK_1_8_0; info.className = "hello.jar"; info.soName = "hello.so"; info.source = "test"; info.properties.put("md5sum", "cf0c536b8f2a0a0690b44d783d019e90"); pluginMgr.replayLoadDynamicPlugin(info); } catch (IOException | UserException e) { e.printStackTrace(); assert false; } }
public static ImmutableSet<HttpUrl> allSubPaths(String url) { return allSubPaths(HttpUrl.parse(url)); }
@Test public void allSubPaths_whenMultipleSubPathsNoTrailingSlash_returnsExpectedUrl() { assertThat(allSubPaths("http://localhost/a/b/c")) .containsExactly( HttpUrl.parse("http://localhost/"), HttpUrl.parse("http://localhost/a/"), HttpUrl.parse("http://localhost/a/b/"), HttpUrl.parse("http://localhost/a/b/c/")); }
public Object execute(ProceedingJoinPoint proceedingJoinPoint, Method method, String fallbackMethodValue, CheckedSupplier<Object> primaryFunction) throws Throwable { String fallbackMethodName = spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); FallbackMethod fallbackMethod = null; if (StringUtils.hasLength(fallbackMethodName)) { try { fallbackMethod = FallbackMethod .create(fallbackMethodName, method, proceedingJoinPoint.getArgs(), proceedingJoinPoint.getTarget(), proceedingJoinPoint.getThis()); } catch (NoSuchMethodException ex) { logger.warn("No fallback method match found", ex); } } if (fallbackMethod == null) { return primaryFunction.get(); } else { return fallbackDecorators.decorate(fallbackMethod, primaryFunction).get(); } }
@Test public void testPrimaryMethodExecutionWithFallbackWithIncorrectSignature() throws Throwable { Method method = this.getClass().getMethod("getName", String.class); final CheckedSupplier<Object> primaryFunction = () -> getName("Name"); final String fallbackMethodValue = "getNameInvalidFallback"; when(proceedingJoinPoint.getArgs()).thenReturn(new Object[]{}); when(proceedingJoinPoint.getTarget()).thenReturn(this); when(spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue)).thenReturn(fallbackMethodValue); when(fallbackDecorators.decorate(any(),eq(primaryFunction))).thenReturn(primaryFunction); final Object result = fallbackExecutor.execute(proceedingJoinPoint, method, fallbackMethodValue, primaryFunction); assertThat(result).isEqualTo("Name"); verify(spelResolver, times(1)).resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); verify(fallbackDecorators, never()).decorate(any(),any()); }
@Override public void handle(ContainersLauncherEvent event) { // TODO: ContainersLauncher launches containers one by one!! Container container = event.getContainer(); ContainerId containerId = container.getContainerId(); switch (event.getType()) { case LAUNCH_CONTAINER: Application app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RELAUNCH_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerRelaunch relaunch = new ContainerRelaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(relaunch); running.put(containerId, relaunch); break; case RECOVER_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RECOVER_PAUSED_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoverPausedContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); break; case CLEANUP_CONTAINER: cleanup(event, containerId, true); break; case CLEANUP_CONTAINER_FOR_REINIT: cleanup(event, containerId, false); break; case SIGNAL_CONTAINER: SignalContainersLauncherEvent signalEvent = (SignalContainersLauncherEvent) event; ContainerLaunch runningContainer = running.get(containerId); if (runningContainer == null) { // Container not launched. So nothing needs to be done. LOG.info("Container " + containerId + " not running, nothing to signal."); return; } try { runningContainer.signalContainer(signalEvent.getCommand()); } catch (IOException e) { LOG.warn("Got exception while signaling container " + containerId + " with command " + signalEvent.getCommand()); } break; case PAUSE_CONTAINER: ContainerLaunch launchedContainer = running.get(containerId); if (launchedContainer == null) { // Container not launched. So nothing needs to be done. return; } // Pause the container try { launchedContainer.pauseContainer(); } catch (Exception e) { LOG.info("Got exception while pausing container: " + StringUtils.stringifyException(e)); } break; case RESUME_CONTAINER: ContainerLaunch launchCont = running.get(containerId); if (launchCont == null) { // Container not launched. So nothing needs to be done. return; } // Resume the container. try { launchCont.resumeContainer(); } catch (Exception e) { LOG.info("Got exception while resuming container: " + StringUtils.stringifyException(e)); } break; } }
@Test public void testResumeContainerEvent() throws IllegalArgumentException, IllegalAccessException, IOException { spy.running.clear(); spy.running.put(containerId, containerLaunch); when(event.getType()) .thenReturn(ContainersLauncherEventType.RESUME_CONTAINER); doNothing().when(containerLaunch).resumeContainer(); spy.handle(event); assertEquals(1, spy.running.size()); Mockito.verify(containerLaunch, Mockito.times(1)).resumeContainer(); }
@Override public Runner get() { if (runner == null) { runner = createRunner(); } return runner; }
@Test void should_return_the_same_runner_on_subsequent_calls() { assertThat(runnerSupplier.get(), is(equalTo(runnerSupplier.get()))); }
@Override public <E extends Extension> ExtensionStore convertTo(E extension) { var gvk = extension.groupVersionKind(); var scheme = schemeManager.get(gvk); try { var convertedExtension = Optional.of(extension) .map(item -> scheme.type().isAssignableFrom(item.getClass()) ? item : objectMapper.convertValue(item, scheme.type()) ) .orElseThrow(); var validation = new ValidationData<>(extension); var extensionJsonNode = objectMapper.valueToTree(convertedExtension); var validator = getValidator(scheme); validator.validate(extensionJsonNode, validation); if (!validation.isValid()) { log.debug("Failed to validate Extension: {}, and errors were: {}", extension.getClass(), validation.results()); throw new SchemaViolationException(extension.groupVersionKind(), validation.results()); } var version = extension.getMetadata().getVersion(); var storeName = buildStoreName(scheme, extension.getMetadata().getName()); var data = objectMapper.writeValueAsBytes(extensionJsonNode); return new ExtensionStore(storeName, data, version); } catch (IOException e) { throw new ExtensionConvertException("Failed write Extension as bytes", e); } catch (ResolutionException e) { throw new RuntimeException("Failed to create schema validator", e); } }
@Test void shouldThrowSchemaViolationExceptionWhenNameNotSet() { var fake = new FakeExtension(); Metadata metadata = new Metadata(); fake.setMetadata(metadata); fake.setApiVersion("fake.halo.run/v1alpha1"); fake.setKind("Fake"); var error = assertThrows(SchemaViolationException.class, () -> converter.convertTo(fake)); assertEquals(1, error.getErrors().size()); var result = error.getErrors().items().get(0); assertEquals(1026, result.code()); assertEquals("Field 'name' is required.", result.message()); }
public boolean isEnabled() { return enabled; }
@Test public void testJetIsDisabledByDefault() { assertFalse(new JetConfig().isEnabled()); assertFalse(new Config().getJetConfig().isEnabled()); }
public void changeFieldType(final CustomFieldMapping customMapping, final Set<String> indexSetsIds, final boolean rotateImmediately) { checkFieldTypeCanBeChanged(customMapping.fieldName()); checkType(customMapping); checkAllIndicesSupportFieldTypeChange(customMapping.fieldName(), indexSetsIds); for (String indexSetId : indexSetsIds) { try { indexSetService.get(indexSetId).ifPresent(indexSetConfig -> { var updatedIndexSetConfig = storeMapping(customMapping, indexSetConfig); if (rotateImmediately) { updatedIndexSetConfig.ifPresent(this::cycleIndexSet); } }); } catch (Exception ex) { LOG.error("Failed to update field type in index set : " + indexSetId, ex); throw ex; } } }
@Test void testDoesNotCycleIndexSetWhenMappingAlreadyExisted() { doReturn(Optional.of(existingIndexSet)).when(indexSetService).get("existing_index_set"); toTest.changeFieldType(existingCustomFieldMapping, new LinkedHashSet<>(List.of("existing_index_set", "wrong_index_set")), true); verify(existingMongoIndexSet, never()).cycle(); verifyNoMoreInteractions(mongoIndexSetService); }
@Override public void execute(Context context) { try (CloseableIterator<DefaultIssue> issues = protoIssueCache.traverse()) { while (issues.hasNext()) { DefaultIssue issue = issues.next(); if (shouldUpdateIndexForIssue(issue)) { changedIssuesRepository.addIssueKey(issue.key()); } } } }
@Test public void execute_whenIssueIsNew_shouldLoadIssue() { protoIssueCache.newAppender() .append(newDefaultIssue().setNew(true)) .close(); underTest.execute(mock(ComputationStep.Context.class)); verify(changedIssuesRepository).addIssueKey("issueKey1"); }
public static boolean removeDisabledPartitions(InstanceConfig instanceConfig) { ZNRecord record = instanceConfig.getRecord(); String disabledPartitionsKey = InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name(); boolean listUpdated = record.getListFields().remove(disabledPartitionsKey) != null; boolean mapUpdated = record.getMapFields().remove(disabledPartitionsKey) != null; return listUpdated | mapUpdated; }
@Test public void testRemoveDisabledPartitions() { String instanceId = "Server_myInstance"; InstanceConfig instanceConfig = new InstanceConfig(instanceId); assertTrue(instanceConfig.getDisabledPartitionsMap().isEmpty()); assertFalse(HelixHelper.removeDisabledPartitions(instanceConfig)); instanceConfig.setInstanceEnabledForPartition("myResource", "myPartition", false); assertFalse(instanceConfig.getDisabledPartitionsMap().isEmpty()); assertTrue(HelixHelper.removeDisabledPartitions(instanceConfig)); assertTrue(instanceConfig.getDisabledPartitionsMap().isEmpty()); }
@Override public Long createNotifyMessage(Long userId, Integer userType, NotifyTemplateDO template, String templateContent, Map<String, Object> templateParams) { NotifyMessageDO message = new NotifyMessageDO().setUserId(userId).setUserType(userType) .setTemplateId(template.getId()).setTemplateCode(template.getCode()) .setTemplateType(template.getType()).setTemplateNickname(template.getNickname()) .setTemplateContent(templateContent).setTemplateParams(templateParams).setReadStatus(false); notifyMessageMapper.insert(message); return message.getId(); }
@Test public void testCreateNotifyMessage_success() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class); String templateContent = randomString(); Map<String, Object> templateParams = randomTemplateParams(); // mock 方法 // 调用 Long messageId = notifyMessageService.createNotifyMessage(userId, userType, template, templateContent, templateParams); // 断言 NotifyMessageDO message = notifyMessageMapper.selectById(messageId); assertNotNull(message); assertEquals(userId, message.getUserId()); assertEquals(userType, message.getUserType()); assertEquals(template.getId(), message.getTemplateId()); assertEquals(template.getCode(), message.getTemplateCode()); assertEquals(template.getType(), message.getTemplateType()); assertEquals(template.getNickname(), message.getTemplateNickname()); assertEquals(templateContent, message.getTemplateContent()); assertEquals(templateParams, message.getTemplateParams()); assertEquals(false, message.getReadStatus()); assertNull(message.getReadTime()); }
@Override public void decode() throws Exception { if (!hasDecoded && channel != null && inputStream != null) { try { decode(channel, inputStream); } catch (Throwable e) { if (log.isWarnEnabled()) { log.warn(PROTOCOL_FAILED_DECODE, "", "", "Decode rpc invocation failed: " + e.getMessage(), e); } request.setBroken(true); request.setData(e); } finally { hasDecoded = true; } } }
@Test void test() throws Exception { // Simulate the data called by the client(The called data is stored in invocation and written to the buffer) URL url = new ServiceConfigURL("dubbo", "127.0.0.1", 9103, DemoService.class.getName(), VERSION_KEY, "1.0.0"); RpcInvocation inv = new RpcInvocation( null, "sayHello", DemoService.class.getName(), "", new Class<?>[] {String.class}, new String[] {"yug"}); inv.setObjectAttachment(PATH_KEY, url.getPath()); inv.setObjectAttachment(VERSION_KEY, url.getVersion()); inv.setObjectAttachment(DUBBO_VERSION_KEY, DUBBO_VERSION); inv.setObjectAttachment("k1", "v1"); inv.setObjectAttachment("k2", "v2"); inv.setTargetServiceUniqueName(url.getServiceKey()); // Write the data of inv to the buffer Byte proto = CodecSupport.getIDByName(DefaultSerializationSelector.getDefaultRemotingSerialization()); ChannelBuffer buffer = writeBuffer(url, inv, proto); FrameworkModel frameworkModel = new FrameworkModel(); ApplicationModel applicationModel = frameworkModel.newApplication(); applicationModel .getDefaultModule() .getServiceRepository() .registerService(DemoService.class.getName(), DemoService.class); frameworkModel .getBeanFactory() .getBean(PermittedSerializationKeeper.class) .registerService(url); // Simulate the server to decode Channel channel = new MockChannel(); Request request = new Request(1); ChannelBufferInputStream is = new ChannelBufferInputStream(buffer, buffer.readableBytes()); DecodeableRpcInvocation decodeableRpcInvocation = new DecodeableRpcInvocation(frameworkModel, channel, request, is, proto); decodeableRpcInvocation.decode(); // Verify that the decodeableRpcInvocation data decoded by the server is consistent with the invocation data of // the client Assertions.assertEquals(request.getVersion(), DUBBO_VERSION); Assertions.assertEquals(decodeableRpcInvocation.getObjectAttachment(DUBBO_VERSION_KEY), DUBBO_VERSION); Assertions.assertEquals( decodeableRpcInvocation.getObjectAttachment(VERSION_KEY), inv.getObjectAttachment(VERSION_KEY)); Assertions.assertEquals( decodeableRpcInvocation.getObjectAttachment(PATH_KEY), inv.getObjectAttachment(PATH_KEY)); Assertions.assertEquals(decodeableRpcInvocation.getMethodName(), inv.getMethodName()); Assertions.assertEquals(decodeableRpcInvocation.getParameterTypesDesc(), inv.getParameterTypesDesc()); Assertions.assertArrayEquals(decodeableRpcInvocation.getParameterTypes(), inv.getParameterTypes()); Assertions.assertArrayEquals(decodeableRpcInvocation.getArguments(), inv.getArguments()); Assertions.assertTrue( CollectionUtils.mapEquals(decodeableRpcInvocation.getObjectAttachments(), inv.getObjectAttachments())); Assertions.assertEquals(decodeableRpcInvocation.getTargetServiceUniqueName(), inv.getTargetServiceUniqueName()); frameworkModel.destroy(); }
public static ExecutorService newScalingThreadPool(int min, int max, long keepAliveTime) { ScalingQueue<Runnable> queue = new ScalingQueue<>(); ThreadPoolExecutor executor = new ScalingThreadPoolExecutor(min, max, keepAliveTime, TimeUnit.MILLISECONDS, queue); executor.setRejectedExecutionHandler(new ForceQueuePolicy()); return executor; }
@Test public void testCreateThreadPerRunnable() { ThreadPoolExecutor executorService = (ThreadPoolExecutor) ScalingThreadPoolExecutor.newScalingThreadPool(0, 5, 500); assertEquals(executorService.getLargestPoolSize(), 0); for (int i = 0; i < 5; i++) { executorService.submit(getSleepingRunnable()); } assertTrue(executorService.getLargestPoolSize() >= 2); }
@Nullable @SuppressWarnings("checkstyle:returncount") static Metadata resolve(InternalSerializationService ss, Object target, boolean key) { try { if (target instanceof Data) { Data data = (Data) target; if (data.isPortable()) { ClassDefinition classDefinition = ss.getPortableContext().lookupClassDefinition(data); return resolvePortable(classDefinition, key); } else if (data.isCompact()) { return resolveCompact(ss.extractSchemaFromData(data), key); } else if (data.isJson()) { return null; } else { return resolveJava(ss.toObject(data).getClass(), key); } } else if (target instanceof VersionedPortable) { VersionedPortable portable = (VersionedPortable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), portable.getClassVersion()); return resolvePortable(classDefinition, key); } else if (target instanceof Portable) { Portable portable = (Portable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), 0); return resolvePortable(classDefinition, key); } else if (target instanceof PortableGenericRecord) { return resolvePortable(((PortableGenericRecord) target).getClassDefinition(), key); } else if (target instanceof CompactGenericRecord) { return resolveCompact(((CompactGenericRecord) target).getSchema(), key); } else if (ss.isCompactSerializable(target)) { Schema schema = ss.extractSchemaFromObject(target); return resolveCompact(schema, key); } else if (target instanceof HazelcastJsonValue) { return null; } else { return resolveJava(target.getClass(), key); } } catch (Exception e) { return null; } }
@Test public void test_json() { InternalSerializationService ss = new DefaultSerializationServiceBuilder().build(); Metadata metadata = SampleMetadataResolver.resolve(ss, new HazelcastJsonValue("{}"), key); assertThat(metadata).isNull(); metadata = SampleMetadataResolver.resolve(ss, ss.toData(new HazelcastJsonValue("{}")), key); assertThat(metadata).isNull(); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); // Node 1 goes from 0.51 to 0.49, crossing the 0.5 threshold. Should still be blocked. // Node 2 is at 0.49 but was not previously blocked and should not be blocked now either. var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNotNull(feedBlock); // TODO should we not change the limits themselves? Explicit mention of hysteresis state? assertEquals(decorate(cf, "memory on node 1 [storage.1.local] is 49.0% full (the configured limit is 40.0%)"), feedBlock.getDescription()); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public NodeInfo get() { return getNodeInfo(); }
@Test public void testSingleNodesXML() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("node") .path("info/").accept(MediaType.APPLICATION_XML) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML+ "; " + JettyUtils.UTF_8, response.getType().toString()); String xml = response.getEntity(String.class); DocumentBuilderFactory dbf = XMLUtils.newSecureDocumentBuilderFactory(); DocumentBuilder db = dbf.newDocumentBuilder(); InputSource is = new InputSource(new StringReader(xml)); Document dom = db.parse(is); NodeList nodes = dom.getElementsByTagName("nodeInfo"); assertEquals("incorrect number of elements", 1, nodes.getLength()); verifyNodesXML(nodes); }
public static SeaTunnelDataType<?> mapping(String tdengineType) { switch (tdengineType) { case TDENGINE_BOOL: case TDENGINE_BIT: return BasicType.BOOLEAN_TYPE; case TDENGINE_TINYINT: case TDENGINE_TINYINT_UNSIGNED: case TDENGINE_SMALLINT: case TDENGINE_SMALLINT_UNSIGNED: case TDENGINE_MEDIUMINT: case TDENGINE_MEDIUMINT_UNSIGNED: case TDENGINE_INT: case TDENGINE_INTEGER: case TDENGINE_YEAR: return BasicType.INT_TYPE; case TDENGINE_INT_UNSIGNED: case TDENGINE_INTEGER_UNSIGNED: case TDENGINE_BIGINT: return BasicType.LONG_TYPE; case TDENGINE_BIGINT_UNSIGNED: return new DecimalType(20, 0); case TDENGINE_DECIMAL: log.warn("{} will probably cause value overflow.", TDENGINE_DECIMAL); return new DecimalType(38, 18); case TDENGINE_DECIMAL_UNSIGNED: return new DecimalType(38, 18); case TDENGINE_FLOAT: return BasicType.FLOAT_TYPE; case TDENGINE_FLOAT_UNSIGNED: log.warn("{} will probably cause value overflow.", TDENGINE_FLOAT_UNSIGNED); return BasicType.FLOAT_TYPE; case TDENGINE_DOUBLE: return BasicType.DOUBLE_TYPE; case TDENGINE_DOUBLE_UNSIGNED: log.warn("{} will probably cause value overflow.", TDENGINE_DOUBLE_UNSIGNED); return BasicType.DOUBLE_TYPE; case TDENGINE_CHAR: case TDENGINE_TINYTEXT: case TDENGINE_MEDIUMTEXT: case TDENGINE_TEXT: case TDENGINE_VARCHAR: case TDENGINE_JSON: case TDENGINE_LONGTEXT: return BasicType.STRING_TYPE; case TDENGINE_DATE: return LocalTimeType.LOCAL_DATE_TYPE; case TDENGINE_TIME: return LocalTimeType.LOCAL_TIME_TYPE; case TDENGINE_DATETIME: case TDENGINE_TIMESTAMP: return LocalTimeType.LOCAL_DATE_TIME_TYPE; case TDENGINE_TINYBLOB: case TDENGINE_MEDIUMBLOB: case TDENGINE_BLOB: case TDENGINE_LONGBLOB: case TDENGINE_VARBINARY: case TDENGINE_BINARY: return PrimitiveByteArrayType.INSTANCE; // Doesn't support yet case TDENGINE_GEOMETRY: case TDENGINE_UNKNOWN: default: throw new TDengineConnectorException( CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, String.format("Doesn't support TDENGINE type '%s' yet.", tdengineType)); } }
@Test void mapping() { SeaTunnelDataType<?> type = TDengineTypeMapper.mapping("BOOL"); Assertions.assertEquals(BasicType.BOOLEAN_TYPE, type); type = TDengineTypeMapper.mapping("CHAR"); Assertions.assertEquals(BasicType.STRING_TYPE, type); }
public static PathMatcherPredicate matches(final List<String> patterns) { return new PathMatcherPredicate(null, patterns); }
@Test void shouldMatchGivenSimpleExpressionAndBasePath() { // Given List<Path> paths = Stream.of("/base/test.txt", "/base/sub/dir/test.txt").map(Path::of).toList(); PathMatcherPredicate predicate = PathMatcherPredicate.matches(Path.of("/base"), List.of("test.txt")); // When List<Path> filtered = paths.stream().filter(predicate).toList(); // Then assertEquals(List.of(Path.of("/base/test.txt")), filtered); }
@Override public DdlCommand create( final String sqlExpression, final DdlStatement ddlStatement, final SessionConfig config ) { return FACTORIES .getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> { throw new KsqlException( "Unable to find ddl command factory for statement:" + statement.getClass() + " valid statements:" + FACTORIES.keySet() ); }) .handle( this, new CallInfo(sqlExpression, config), ddlStatement); }
@Test public void shouldCreateCommandForCreateSourceTable() { // Given: final CreateTable statement = new CreateTable(SOME_NAME, TableElements.of( tableElement("COL1", new Type(SqlTypes.BIGINT)), tableElement("COL2", new Type(SqlTypes.STRING))), false, true, withProperties, true); // When: final DdlCommand result = commandFactories .create(sqlExpression, statement, SessionConfig.of(ksqlConfig, emptyMap())); // Then: assertThat(result, is(createTableCommand)); verify(createSourceFactory).createTableCommand(statement, ksqlConfig); }
@PUT @Path("{id}") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response updatePort(@PathParam("id") String id, InputStream input) throws IOException { log.trace(String.format(MESSAGE, "UPDATE " + id)); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPut(haService, PORTS, id, inputStr); } final NeutronPort port = (NeutronPort) jsonToModelEntity(inputStr, NeutronPort.class); adminService.updatePort(port); ObjectMapper mapper = new ObjectMapper(); ObjectNode jsonNode = mapper.createObjectNode(); OpenstackNode node = nodeService.node(port.getHostId()); if (node == null) { return status(Response.Status.OK).build(); } else if (node.datapathType().equals(DpdkConfig.DatapathType.NETDEV)) { log.debug("UpdatePort for port {} called in netdev device {} " + "so sends vif type as a payload of the response", port.getId(), node.hostname()); jsonNode.put(VIF_TYPE, VHOSTUSER); if (node.socketDir() != null) { jsonNode.put(SOCKET_DIR, node.socketDir()); } return status(Response.Status.OK).entity(jsonNode.toString()).build(); } else { return status(Response.Status.OK).build(); } }
@Test public void testUpdatePortWithNonexistId() { expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); mockOpenstackNetworkAdminService.updatePort(anyObject()); expectLastCall().andThrow(new IllegalArgumentException()); replay(mockOpenstackNetworkAdminService); final WebTarget wt = target(); InputStream jsonStream = OpenstackNetworkWebResourceTest.class .getResourceAsStream("openstack-port.json"); Response response = wt.path(PATH + "/65c0ee9f-d634-4522-8954-51021b570b0d") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(400)); verify(mockOpenstackNetworkAdminService); }
public boolean isService(Object bean, String beanName) { for (RemotingParser remotingParser : allRemotingParsers) { if (remotingParser.isService(bean, beanName)) { return true; } } return false; }
@Test public void testIsServiceFromObject() { SimpleRemoteBean remoteBean = new SimpleRemoteBean(); assertTrue(remotingParser.isService(remoteBean, remoteBean.getClass().getName())); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(SQLSERVER_BIT); builder.dataType(SQLSERVER_BIT); break; case TINYINT: builder.columnType(SQLSERVER_TINYINT); builder.dataType(SQLSERVER_TINYINT); break; case SMALLINT: builder.columnType(SQLSERVER_SMALLINT); builder.dataType(SQLSERVER_SMALLINT); break; case INT: builder.columnType(SQLSERVER_INT); builder.dataType(SQLSERVER_INT); break; case BIGINT: builder.columnType(SQLSERVER_BIGINT); builder.dataType(SQLSERVER_BIGINT); break; case FLOAT: builder.columnType(SQLSERVER_REAL); builder.dataType(SQLSERVER_REAL); break; case DOUBLE: builder.columnType(SQLSERVER_FLOAT); builder.dataType(SQLSERVER_FLOAT); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", SQLSERVER_DECIMAL, precision, scale)); builder.dataType(SQLSERVER_DECIMAL); builder.precision(precision); builder.scale(scale); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); } else if (column.getColumnLength() <= MAX_NVARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_NVARCHAR, column.getColumnLength())); builder.dataType(SQLSERVER_NVARCHAR); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); builder.length(column.getColumnLength()); } break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_VARBINARY, column.getColumnLength())); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } break; case DATE: builder.columnType(SQLSERVER_DATE); builder.dataType(SQLSERVER_DATE); break; case TIME: if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", SQLSERVER_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(SQLSERVER_TIME); } builder.dataType(SQLSERVER_TIME); break; case TIMESTAMP: if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("%s(%s)", SQLSERVER_DATETIME2, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(SQLSERVER_DATETIME2); } builder.dataType(SQLSERVER_DATETIME2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.SQLSERVER, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertInt() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.INT_TYPE).build(); BasicTypeDefine typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_INT, typeDefine.getColumnType()); Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_INT, typeDefine.getDataType()); }
public static Bar replaceBarIfChanged(BarSeries barSeries, Bar newBar) { List<Bar> bars = barSeries.getBarData(); if (bars == null || bars.isEmpty()) return null; for (int i = 0; i < bars.size(); i++) { Bar bar = bars.get(i); boolean isSameBar = bar.getBeginTime().isEqual(newBar.getBeginTime()) && bar.getEndTime().isEqual(newBar.getEndTime()) && bar.getTimePeriod().equals(newBar.getTimePeriod()); if (isSameBar && !bar.equals(newBar)) return bars.set(i, newBar); } return null; }
@Test public void replaceBarIfChangedTest() { final List<Bar> bars = new ArrayList<>(); time = ZonedDateTime.of(2019, 6, 1, 1, 1, 0, 0, ZoneId.systemDefault()); final Bar bar0 = new MockBar(time, 1d, 2d, 3d, 4d, 5d, 0d, 7, numFunction); final Bar bar1 = new MockBar(time.plusDays(1), 1d, 1d, 1d, 1d, 1d, 1d, 1, numFunction); final Bar bar2 = new MockBar(time.plusDays(2), 2d, 2d, 2d, 2d, 2d, 2d, 2, numFunction); final Bar bar3 = new MockBar(time.plusDays(3), 3d, 3d, 3d, 3d, 3d, 3d, 3, numFunction); final Bar bar4 = new MockBar(time.plusDays(4), 3d, 4d, 4d, 5d, 6d, 4d, 4, numFunction); final Bar bar5 = new MockBar(time.plusDays(5), 5d, 5d, 5d, 5d, 5d, 5d, 5, numFunction); final Bar bar6 = new MockBar(time.plusDays(6), 6d, 6d, 6d, 6d, 6d, 6d, 6, numFunction); bars.add(bar0); bars.add(bar1); bars.add(bar2); bars.add(bar3); bars.add(bar4); bars.add(bar5); bars.add(bar6); series = new BaseBarSeriesBuilder().withNumTypeOf(numFunction).withName("Series Name").withBars(bars).build(); final Bar newBar3 = new MockBar(bar3.getEndTime(), 1d, 1d, 1d, 1d, 1d, 1d, 33, numFunction); final Bar newBar5 = new MockBar(bar5.getEndTime(), 1d, 1d, 1d, 1d, 1d, 1d, 55, numFunction); // newBar3 must be replaced with bar3 Bar replacedBar3 = BarSeriesUtils.replaceBarIfChanged(series, newBar3); // newBar5 must be replaced with bar5 Bar replacedBar5 = BarSeriesUtils.replaceBarIfChanged(series, newBar5); // the replaced bar must be the same as the previous bar assertEquals(bar3, replacedBar3); assertEquals(bar5, replacedBar5); assertNotEquals(bar2, replacedBar3); assertNotEquals(bar6, replacedBar5); // the replaced bar must removed from the series assertNotEquals(series.getBar(3), replacedBar3); assertNotEquals(series.getBar(5), replacedBar5); // the new bar must be stored in the series assertEquals(series.getBar(3), newBar3); assertEquals(series.getBar(5), newBar5); // no bar was added assertEquals(7, series.getBarData().size()); assertEquals(7, series.getBarCount()); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldRaiseExceptionIfKeyDoesNotExistEditablePropertiesList() { configuredStatement = configuredStatement("ALTER SYSTEM 'ksql.streams.upgrade.from'='TEST';" , alterSystemProperty); when(alterSystemProperty.getPropertyName()).thenReturn("ksql.streams.upgrade.from"); when(alterSystemProperty.getPropertyValue()).thenReturn("TEST"); when(config.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)).thenReturn(true); assertThrows(ConfigException.class, () -> commandFactory.create(configuredStatement, executionContext)); }
public static Request.Builder buildRequestBuilder(final String url, final Map<String, ?> form, final HTTPMethod method) { switch (method) { case GET: return new Request.Builder() .url(buildHttpUrl(url, form)) .get(); case HEAD: return new Request.Builder() .url(buildHttpUrl(url, form)) .head(); case PUT: return new Request.Builder() .url(buildHttpUrl(url)) .put(buildFormBody(form)); case DELETE: return new Request.Builder() .url(buildHttpUrl(url)) .delete(buildFormBody(form)); default: return new Request.Builder() .url(buildHttpUrl(url)) .post(buildFormBody(form)); } }
@Test public void buildRequestBuilderForPOSTTest() { Request.Builder builder = HttpUtils.buildRequestBuilder(TEST_URL, formMap, HttpUtils.HTTPMethod.POST); Assert.assertNotNull(builder); Assert.assertNotNull(builder.build().body()); Assert.assertEquals(builder.build().method(), HttpUtils.HTTPMethod.POST.value()); Assert.assertEquals(builder.build().url().toString(), TEST_URL); }
@Override public Num calculate(BarSeries series, Position position) { Num average = averageCriterion.calculate(series, position); return standardDeviationCriterion.calculate(series, position).dividedBy(average); }
@Test public void calculateStandardDeviationPnL() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series, series.one()), Trade.sellAt(2, series, series.one()), Trade.buyAt(3, series, series.one()), Trade.sellAt(5, series, series.one())); AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion()); assertNumEquals(0.3333333333333333, criterion.calculate(series, tradingRecord)); }
public static void error(final Logger logger, final String format, final Supplier<Object> supplier) { if (logger.isErrorEnabled()) { logger.error(format, supplier.get()); } }
@Test public void testNeverError() { when(logger.isErrorEnabled()).thenReturn(false); LogUtils.error(logger, supplier); verify(supplier, never()).get(); }
public static HollowIncrementalProducer.Builder withProducer(HollowProducer hollowProducer) { Builder builder = new Builder(); return builder.withProducer(hollowProducer); }
@Test(expected = IllegalArgumentException.class) public void failsWithNoProducer() { //No Hollow Producer HollowIncrementalProducer.withProducer(null) .build(); }
@Override @NotNull public BTreeMutable getMutableCopy() { final BTreeMutable result = new BTreeMutable(this); result.addExpiredLoggable(rootLoggable); return result; }
@Test public void testPutRightSortDuplicates() { tm = new BTreeEmpty(log, createTestSplittingPolicy(), true, 1).getMutableCopy(); List<INode> expected = new ArrayList<>(); expected.add(kv("1", "1")); expected.add(kv("2", "2")); expected.add(kv("3", "3")); expected.add(kv("5", "51")); expected.add(kv("5", "52")); expected.add(kv("5", "53")); expected.add(kv("5", "54")); expected.add(kv("5", "55")); expected.add(kv("5", "56")); expected.add(kv("5", "57")); expected.add(kv("7", "7")); for (INode ln : expected) { getTreeMutable().putRight(ln); } assertMatchesIterator(tm, expected); }
@Override public boolean imbalanceDetected(LoadImbalance imbalance) { Set<? extends MigratablePipeline> candidates = imbalance.getPipelinesOwnedBy(imbalance.srcOwner); //only attempts to migrate if at least 1 pipeline exists return !candidates.isEmpty(); }
@Test public void imbalanceDetected_shouldReturnTrueWhenPipelineExist() { MigratablePipeline pipeline = mock(MigratablePipeline.class); ownerPipelines.put(imbalance.srcOwner, Set.of(pipeline)); boolean imbalanceDetected = strategy.imbalanceDetected(imbalance); assertTrue(imbalanceDetected); }
public static String getHostFromPrincipal(String principalName) { return new HadoopKerberosName(principalName).getHostName(); }
@Test public void testGetHostFromPrincipal() { assertEquals("host", SecurityUtil.getHostFromPrincipal("service/host@realm")); assertEquals(null, SecurityUtil.getHostFromPrincipal("service@realm")); }
@Override public void validateInputFilePatternSupported(String filepattern) { getGcsPath(filepattern); verifyPath(filepattern); verifyPathIsAccessible(filepattern, "Could not find file %s"); }
@Test public void testFilePatternMissingBucket() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Missing object or bucket in path: 'gs://input/', " + "did you mean: 'gs://some-bucket/input'?"); validator.validateInputFilePatternSupported("gs://input"); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_no_QualityGateStatus_if_dto_has_no_alertStatus_for_Level_Metric() { Optional<Measure> measure = underTest.toMeasure(EMPTY_MEASURE_DTO, SOME_STRING_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().hasQualityGateStatus()).isFalse(); }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void availableDuringJobExecution() { int generatedItems = 1000; pipeline.readFrom(TestSources.itemStream(1_000)) .withIngestionTimestamps() .filter(l -> l.sequence() < generatedItems) .map(t -> { Metrics.metric("total").increment(); return t; }) .writeTo(Sinks.noop()); Job job = instance.getJet().newJob(pipeline, JOB_CONFIG_WITH_METRICS); JobMetricsChecker checker = new JobMetricsChecker(job); assertTrueEventually(() -> checker.assertSummedMetricValue("total", generatedItems)); }
@Override public Processor createPreProcessor(Exchange exchange, DynamicAwareEntry entry) { Processor preProcessor = null; if (DynamicRouterControlConstants.SHOULD_OPTIMIZE.test(entry.getUri())) { preProcessor = queryParamsHeadersProcessor.apply(entry); } return preProcessor; }
@Test void createPreProcessor() throws Exception { Mockito.when(exchange.getMessage()).thenReturn(message); Mockito.doNothing().when(message).setHeader(CONTROL_ACTION_HEADER, "subscribe"); Mockito.doNothing().when(message).setHeader(CONTROL_SUBSCRIPTION_ID, "testSub1"); String originalUri = "dynamic-router-control:subscribe?subscriptionId=testSub1"; String uri = "dynamic-router-control://subscribe?subscriptionId=testSub1"; try (DynamicRouterControlChannelSendDynamicAware testSubject = new DynamicRouterControlChannelSendDynamicAware()) { SendDynamicAware.DynamicAwareEntry entry = testSubject.prepare(exchange, uri, originalUri); Processor preProcessor = testSubject.createPreProcessor(exchange, entry); Assertions.assertNotNull(preProcessor); preProcessor.process(exchange); } }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsMaxTimestampUnsupportedSingleOffsetSpec() { Node node = new Node(0, "localhost", 8120); List<Node> nodes = Collections.singletonList(node); final Cluster cluster = new Cluster( "mockClusterId", nodes, Collections.singleton(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node})), Collections.emptySet(), Collections.emptySet(), node); final TopicPartition tp0 = new TopicPartition("foo", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create( ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 env.kafkaClient().prepareUnsupportedVersionResponse( request -> request instanceof ListOffsetsRequest); ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.maxTimestamp())); TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldEvaluateTypeForStructExpression() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.array(SqlTypes.INTEGER)) .build(); expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); final Expression exp = new CreateStructExpression(ImmutableList.of( new Field("field1", new StringLiteral("foo")), new Field("field2", new UnqualifiedColumnReferenceExp(COL0)), new Field("field3", new CreateStructExpression(ImmutableList.of())) )); // When: final SqlType sqlType = expressionTypeManager.getExpressionSqlType(exp); // Then: assertThat(sqlType, is(SqlTypes.struct() .field("field1", SqlTypes.STRING) .field("field2", SqlTypes.array(SqlTypes.INTEGER)) .field("field3", SqlTypes.struct().build()) .build())); }
@Override public void createApiErrorLog(ApiErrorLogCreateReqDTO createDTO) { ApiErrorLogDO apiErrorLog = BeanUtils.toBean(createDTO, ApiErrorLogDO.class) .setProcessStatus(ApiErrorLogProcessStatusEnum.INIT.getStatus()); apiErrorLog.setRequestParams(StrUtil.maxLength(apiErrorLog.getRequestParams(), REQUEST_PARAMS_MAX_LENGTH)); if (TenantContextHolder.getTenantId() != null) { apiErrorLogMapper.insert(apiErrorLog); } else { // 极端情况下,上下文中没有租户时,此时忽略租户上下文,避免插入失败! TenantUtils.executeIgnore(() -> apiErrorLogMapper.insert(apiErrorLog)); } }
@Test public void testCreateApiErrorLog() { // 准备参数 ApiErrorLogCreateReqDTO createDTO = randomPojo(ApiErrorLogCreateReqDTO.class); // 调用 apiErrorLogService.createApiErrorLog(createDTO); // 断言 ApiErrorLogDO apiErrorLogDO = apiErrorLogMapper.selectOne(null); assertPojoEquals(createDTO, apiErrorLogDO); assertEquals(ApiErrorLogProcessStatusEnum.INIT.getStatus(), apiErrorLogDO.getProcessStatus()); }
@Udf public Map<String, String> splitToMap( @UdfParameter( description = "Separator string and values to join") final String input, @UdfParameter( description = "Separator string and values to join") final String entryDelimiter, @UdfParameter( description = "Separator string and values to join") final String kvDelimiter) { if (input == null || entryDelimiter == null || kvDelimiter == null) { return null; } if (entryDelimiter.isEmpty() || kvDelimiter.isEmpty() || entryDelimiter.equals(kvDelimiter)) { return null; } final Iterable<String> entries = Splitter.on(entryDelimiter).omitEmptyStrings().split(input); return StreamSupport.stream(entries.spliterator(), false) .filter(e -> e.contains(kvDelimiter)) .map(kv -> Splitter.on(kvDelimiter).split(kv).iterator()) .collect(Collectors.toMap( Iterator::next, Iterator::next, (v1, v2) -> v2)); }
@Test public void shouldDropEmptyEntriesFromSplit() { Map<String, String> result = udf.splitToMap("/foo:=apple//bar:=cherry/", "/", ":="); assertThat(result, hasEntry("foo", "apple")); assertThat(result, hasEntry("bar", "cherry")); assertThat(result.size(), equalTo(2)); }
public static ProgressLogger create(Class clazz, AtomicLong counter) { String threadName = String.format("ProgressLogger[%s]", clazz.getSimpleName()); Logger logger = LoggerFactory.getLogger(clazz); return new ProgressLogger(threadName, counter, logger); }
@Test public void create() { ProgressLogger progress = ProgressLogger.create(getClass(), new AtomicLong()); // default values assertThat(progress.getPeriodMs()).isEqualTo(60000L); assertThat(progress.getPluralLabel()).isEqualTo("rows"); // override values progress.setPeriodMs(10L); progress.setPluralLabel("issues"); assertThat(progress.getPeriodMs()).isEqualTo(10L); assertThat(progress.getPluralLabel()).isEqualTo("issues"); }
@Override public KeyValueIterator<Windowed<K>, V> all() { final NextIteratorFunction<Windowed<K>, V, ReadOnlyWindowStore<K, V>> nextIteratorFunction = ReadOnlyWindowStore::all; return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>( provider.stores(storeName, windowStoreType).iterator(), nextIteratorFunction)); }
@Test public void shouldGetAllAcrossStores() { final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE); stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); final List<KeyValue<Windowed<String>, String>> results = StreamsTestUtils.toList(windowStore.all()); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); }
List<List<RingRange>> generateSplits(long totalSplitCount, List<BigInteger> ringTokens) { int tokenRangeCount = ringTokens.size(); List<RingRange> splits = new ArrayList<>(); for (int i = 0; i < tokenRangeCount; i++) { BigInteger start = ringTokens.get(i); BigInteger stop = ringTokens.get((i + 1) % tokenRangeCount); if (!isInRange(start) || !isInRange(stop)) { throw new RuntimeException( String.format("Tokens (%s,%s) not in range of %s", start, stop, partitioner)); } if (start.equals(stop) && tokenRangeCount != 1) { throw new RuntimeException( String.format("Tokens (%s,%s): two nodes have the same token", start, stop)); } BigInteger rs = stop.subtract(start); if (rs.compareTo(BigInteger.ZERO) <= 0) { // wrap around case rs = rs.add(rangeSize); } // the below, in essence, does this: // splitCount = ceiling((rangeSize / RANGE_SIZE) * totalSplitCount) BigInteger[] splitCountAndRemainder = rs.multiply(BigInteger.valueOf(totalSplitCount)).divideAndRemainder(rangeSize); int splitCount = splitCountAndRemainder[0].intValue() + (splitCountAndRemainder[1].equals(BigInteger.ZERO) ? 0 : 1); LOG.debug("Dividing token range [{},{}) into {} splits", start, stop, splitCount); // Make big0 list of all the endpoints for the splits, including both start and stop List<BigInteger> endpointTokens = new ArrayList<>(); for (int j = 0; j <= splitCount; j++) { BigInteger offset = rs.multiply(BigInteger.valueOf(j)).divide(BigInteger.valueOf(splitCount)); BigInteger token = start.add(offset); if (token.compareTo(rangeMax) > 0) { token = token.subtract(rangeSize); } // Long.MIN_VALUE is not a valid token and has to be silently incremented. // See https://issues.apache.org/jira/browse/CASSANDRA-14684 endpointTokens.add( token.equals(BigInteger.valueOf(Long.MIN_VALUE)) ? token.add(BigInteger.ONE) : token); } // Append the splits between the endpoints for (int j = 0; j < splitCount; j++) { splits.add(RingRange.of(endpointTokens.get(j), endpointTokens.get(j + 1))); LOG.debug("Split #{}: [{},{})", j + 1, endpointTokens.get(j), endpointTokens.get(j + 1)); } } BigInteger total = BigInteger.ZERO; for (RingRange split : splits) { BigInteger size = split.span(rangeSize); total = total.add(size); } if (!total.equals(rangeSize)) { throw new RuntimeException( "Some tokens are missing from the splits. " + "This should not happen."); } return coalesceSplits(getTargetSplitSize(totalSplitCount), splits); }
@Test(expected = RuntimeException.class) public void testDisorderedRing() { List<String> tokenStrings = Arrays.asList( "0", "113427455640312821154458202477256070485", "1", "56713727820156410577229101238628035242", "56713727820156410577229101238628035243", "113427455640312821154458202477256070484"); List<BigInteger> tokens = tokenStrings.stream().map(BigInteger::new).collect(Collectors.toList()); SplitGenerator generator = new SplitGenerator("foo.bar.RandomPartitioner"); generator.generateSplits(10, tokens); // Will throw an exception when concluding that the repair segments don't add up. // This is because the tokens were supplied out of order. }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal < ruleVal); }
@Test public void testMissedInvertedMatch() { StreamRule rule = getSampleRule(); rule.setValue("25"); rule.setInverted(true); Message msg = getSampleMessage(); msg.addField("something", "23"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test(expectedExceptions = EmptyStackException.class) public void testMissingOperand1And() { PredicateExpressionParser.parse("& com.linkedin.data.it.AlwaysFalsePredicate"); }
@Override public Optional<String> getContentHash() { return Optional.ofNullable(mContentHash); }
@Test public void flush() throws Exception { int partSize = (int) FormatUtils.parseSpaceSize(PARTITION_SIZE); byte[] b = new byte[2 * partSize - 1]; mStream.write(b, 0, b.length); Mockito.verify(mMockOssClient) .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); Mockito.verify(mMockOutputStream).write(b, 0, partSize); Mockito.verify(mMockOutputStream).write(b, partSize, partSize - 1); Mockito.verify(mMockExecutor).submit(any(Callable.class)); mStream.flush(); Mockito.verify(mMockExecutor, times(2)).submit(any(Callable.class)); Mockito.verify(mMockTag, times(2)).get(); mStream.close(); Mockito.verify(mMockOssClient) .completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); assertTrue(mStream.getContentHash().isPresent()); assertEquals("multiTag", mStream.getContentHash().get()); }
public MethodInvoker getConstructor(Class<?> clazz, Class<?>[] parameters) { MethodDescriptor mDescriptor = new MethodDescriptor(clazz.getName(), clazz, parameters); MethodInvoker mInvoker = null; List<Constructor<?>> acceptableConstructors = null; LRUCache<MethodDescriptor, MethodInvoker> cache = cacheHolder.get(); mInvoker = cache.get(mDescriptor); if (mInvoker == null) { acceptableConstructors = getConstructorsByLength(clazz, parameters.length); if (acceptableConstructors.size() == 1) { mInvoker = MethodInvoker.buildInvoker(acceptableConstructors.get(0), parameters); } else { mInvoker = getBestConstructor(acceptableConstructors, parameters); } if (mInvoker != null && mInvoker.getCost() != -1) { cache.put(mDescriptor, mInvoker); } else { String errorMessage = "Constructor " + clazz.getName() + "(" + Arrays.toString(parameters) + ") does not exist"; logger.log(Level.WARNING, errorMessage); throw new Py4JException(errorMessage); } } return mInvoker; }
@Test public void testGetConstructor() { ReflectionEngine engine = new ReflectionEngine(); MethodInvoker mInvoker = engine.getConstructor("p1.Cat", new Object[] {}); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] {}); // Test cache: mInvoker = engine.getConstructor("p1.Cat", new Object[] {}); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] {}); // Test one only mInvoker = engine.getConstructor("p1.Cat", new Object[] { new Integer(2) }); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] { int.class }); // Test cost computation mInvoker = engine.getConstructor("p1.Cat", new Object[] { new ArrayList<String>(), new String() }); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] { Object.class, String.class }); mInvoker = engine.getConstructor("p1.Cat", new Object[] { "", new String() }); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] { String.class, String.class }); mInvoker = engine.getConstructor("p1.Cat", new Object[] { "a", 2 }); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] { char.class, int.class }); mInvoker = engine.getConstructor("p1.Cat", new Object[] { true, 2 }); assertArrayEquals(mInvoker.getConstructor().getParameterTypes(), new Class[] { boolean.class, short.class }); // Test invokation mInvoker = engine.getConstructor("p1.Cat", new Object[] { "a", 2 }); Object obj = mInvoker.invoke(null, new Object[] { "a", 2 }); assertTrue(obj instanceof Cat); }
@Override public CloseableIterator<ScannerReport.Measure> readComponentMeasures(int componentRef) { ensureInitialized(); return delegate.readComponentMeasures(componentRef); }
@Test public void readComponentMeasures_is_not_cached() { writer.appendComponentMeasure(COMPONENT_REF, MEASURE); assertThat(underTest.readComponentMeasures(COMPONENT_REF)).isNotSameAs(underTest.readComponentMeasures(COMPONENT_REF)); }
public int allocate(final String label) { return allocate(label, DEFAULT_TYPE_ID); }
@Test void shouldStoreMetaData() { final int typeIdOne = 333; final long keyOne = 777L; final int typeIdTwo = 222; final long keyTwo = 444; final int counterIdOne = manager.allocate("Test Label One", typeIdOne, (buffer) -> buffer.putLong(0, keyOne)); final int counterIdTwo = manager.allocate("Test Label Two", typeIdTwo, (buffer) -> buffer.putLong(0, keyTwo)); manager.forEach(metaData); final ArgumentCaptor<DirectBuffer> argCaptorOne = ArgumentCaptor.forClass(DirectBuffer.class); final ArgumentCaptor<DirectBuffer> argCaptorTwo = ArgumentCaptor.forClass(DirectBuffer.class); final InOrder inOrder = Mockito.inOrder(metaData); inOrder.verify(metaData).accept(eq(counterIdOne), eq(typeIdOne), argCaptorOne.capture(), eq("Test Label One")); inOrder.verify(metaData).accept(eq(counterIdTwo), eq(typeIdTwo), argCaptorTwo.capture(), eq("Test Label Two")); inOrder.verifyNoMoreInteractions(); final DirectBuffer keyOneBuffer = argCaptorOne.getValue(); assertEquals(keyOne, keyOneBuffer.getLong(0)); final DirectBuffer keyTwoBuffer = argCaptorTwo.getValue(); assertEquals(keyTwo, keyTwoBuffer.getLong(0)); assertEquals(typeIdOne, manager.getCounterTypeId(counterIdOne)); assertEquals(typeIdTwo, manager.getCounterTypeId(counterIdTwo)); }
@Override public void notifyClientDisconnected(final String clientID, final String username) { for (final InterceptHandler handler : this.handlers.get(InterceptDisconnectMessage.class)) { LOG.debug("Notifying MQTT client disconnection to interceptor. CId={}, username={}, interceptorId={}", clientID, username, handler.getID()); executor.execute(() -> handler.onDisconnect(new InterceptDisconnectMessage(clientID, username))); } }
@Test public void testNotifyClientDisconnected() throws Exception { interceptor.notifyClientDisconnected("cli1234", "cli1234"); interval(); assertEquals(50, n.get()); }
@JsonCreator public static ModelVersion of(String version) { Preconditions.checkArgument(StringUtils.isNotBlank(version), "Version must not be blank"); return new AutoValue_ModelVersion(version); }
@Test public void deserialize() { final ModelVersion modelVersion = ModelVersion.of("foobar"); final JsonNode jsonNode = objectMapper.convertValue(modelVersion, JsonNode.class); assertThat(jsonNode.isTextual()).isTrue(); assertThat(jsonNode.asText()).isEqualTo("foobar"); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("name", (Gauge<String>) runtime::getName); gauges.put("vendor", (Gauge<String>) () -> String.format(Locale.US, "%s %s %s (%s)", runtime.getVmVendor(), runtime.getVmName(), runtime.getVmVersion(), runtime.getSpecVersion())); gauges.put("uptime", (Gauge<Long>) runtime::getUptime); return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForTheJVMName() { final Gauge<String> gauge = (Gauge<String>) gauges.getMetrics().get("name"); assertThat(gauge.getValue()) .isEqualTo("9928@example.com"); }
public static Builder newScheduledThreadPool() { return new Builder(); }
@Test public void throwsExceptionWhenCorePoolSizeLessThanOne() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> ContextAwareScheduledThreadPoolExecutor .newScheduledThreadPool() .corePoolSize(0) .build()); }
@Override public void startScheduling() { checkIdleSlotTimeout(); state.as(Created.class) .orElseThrow( () -> new IllegalStateException( "Can only start scheduling when being in Created state.")) .startScheduling(); }
@Test void testNumRestartsMetric() throws Exception { final CompletableFuture<Gauge<Long>> numRestartsMetricFuture = new CompletableFuture<>(); final MetricRegistry metricRegistry = TestingMetricRegistry.builder() .setRegisterConsumer( (metric, name, group) -> { if (MetricNames.NUM_RESTARTS.equals(name)) { numRestartsMetricFuture.complete((Gauge<Long>) metric); } }) .build(); final JobGraph jobGraph = createJobGraph(); final DefaultDeclarativeSlotPool declarativeSlotPool = new DefaultDeclarativeSlotPool( jobGraph.getJobID(), new DefaultAllocatedSlotPool(), ignored -> {}, Duration.ofMinutes(10), Duration.ofMinutes(10), Duration.ZERO, mainThreadExecutor); final Configuration configuration = createConfigurationWithNoTimeouts(); configuration.set(JobManagerOptions.MIN_PARALLELISM_INCREASE, 1); final AdaptiveScheduler scheduler = new AdaptiveSchedulerBuilder( jobGraph, singleThreadMainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .setJobMasterConfiguration(configuration) .setJobManagerJobMetricGroup( JobManagerMetricGroup.createJobManagerMetricGroup( metricRegistry, "localhost") .addJob(new JobID(), "jobName")) .setDeclarativeSlotPool(declarativeSlotPool) .build(); final Gauge<Long> numRestartsMetric = numRestartsMetricFuture.get(); final SubmissionBufferingTaskManagerGateway taskManagerGateway = new SubmissionBufferingTaskManagerGateway(1 + PARALLELISM); taskManagerGateway.setCancelConsumer(createCancelConsumer(scheduler)); singleThreadMainThreadExecutor.execute( () -> { scheduler.startScheduling(); declarativeSlotPool.offerSlots( createSlotOffersForResourceRequirements( ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)), new LocalTaskManagerLocation(), taskManagerGateway, System.currentTimeMillis()); }); // wait for the first task submission taskManagerGateway.waitForSubmissions(1); assertThat(numRestartsMetric.getValue()).isEqualTo(0L); singleThreadMainThreadExecutor.execute( () -> { // offer more slots, which will cause a restart in order to scale up offerSlots( declarativeSlotPool, createSlotOffersForResourceRequirements( ResourceCounter.withResource( ResourceProfile.UNKNOWN, PARALLELISM)), taskManagerGateway); }); // wait for the second task submissions taskManagerGateway.waitForSubmissions(PARALLELISM); assertThat(numRestartsMetric.getValue()).isEqualTo(1L); }
public void shutdown() { isShutdown = true; responseHandlerSupplier.shutdown(); for (ClientInvocation invocation : invocations.values()) { //connection manager and response handler threads are closed at this point. invocation.notifyExceptionWithOwnedPermission(new HazelcastClientNotActiveException()); } }
@Test public void testInvokeUrgent_whenThereAreCompactSchemas_andClientIsNotInitializedOnCluster() { client.getMap("testMap").put("test", new EmployeeDTO()); UUID memberUuid = member.getLocalEndpoint().getUuid(); member.shutdown(); makeSureDisconnectedFromServer(client, memberUuid); // Some compact schemas, need to check urgent invocations assertTrue(client.shouldCheckUrgentInvocations()); // client is disconnected, so not initialized on cluster assertTrueEventually(() -> assertFalse(client.getConnectionManager().clientInitializedOnCluster())); ClientConnection connection = mockConnection(); // Urgent invocations should be done, if they contain no data ClientInvocation pingInvocation = checkUrgentInvocation_withNoData(connection); ClientInvocation setAddInvocation = checkUrgentInvocation_withData(connection); verify(connection, times(1)).write(pingInvocation.getClientMessage()); verify(connection, never()).write(setAddInvocation.getClientMessage()); }
@InvokeOnHeader(Web3jConstants.ETH_GET_UNCLE_COUNT_BY_BLOCK_NUMBER) void ethGetUncleCountByBlockNumber(Message message) throws IOException { DefaultBlockParameter atBlock = toDefaultBlockParameter(message.getHeader(Web3jConstants.AT_BLOCK, configuration::getAtBlock, String.class)); Request<?, EthGetUncleCountByBlockNumber> request = web3j.ethGetUncleCountByBlockNumber(atBlock); setRequestId(message, request); EthGetUncleCountByBlockNumber response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getUncleCount()); } }
@Test public void ethGetUncleCountByBlockNumberTest() throws Exception { EthGetUncleCountByBlockNumber response = Mockito.mock(EthGetUncleCountByBlockNumber.class); Mockito.when(mockWeb3j.ethGetUncleCountByBlockNumber(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getUncleCount()).thenReturn(BigInteger.ONE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_UNCLE_COUNT_BY_BLOCK_NUMBER); template.send(exchange); BigInteger body = exchange.getIn().getBody(BigInteger.class); assertEquals(BigInteger.ONE, body); }
@Override public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) { // eliminate the bean without two phase annotation. Set<String> methodsToProxy = this.tccProxyTargetMethod(target); if (methodsToProxy.isEmpty()) { return null; } // register resource and enhance with interceptor DefaultResourceRegisterParser.get().registerResource(target, objectName); return new TccActionInterceptorHandler(target, methodsToProxy); }
@Test public void testNestTcc_required_new_should_rollback_commit() throws Exception { //given RootContext.unbind(); DefaultResourceManager.get(); DefaultResourceManager.mockResourceManager(BranchType.TCC, resourceManager); TransactionManagerHolder.set(transactionManager); TccActionImpl tccAction = new TccActionImpl(); TccAction tccActionProxy = ProxyUtil.createProxy(tccAction); Assertions.assertNotNull(tccActionProxy); NestTccActionImpl nestTccAction = new NestTccActionImpl(); nestTccAction.setTccAction(tccActionProxy); //when ProxyInvocationHandler proxyInvocationHandler = DefaultInterfaceParser.get().parserInterfaceToProxy(nestTccAction, nestTccAction.getClass().getName()); //then Assertions.assertNotNull(proxyInvocationHandler); //when NestTccAction nestTccActionProxy = ProxyUtil.createProxy(nestTccAction); //then Assertions.assertNotNull(nestTccActionProxy); // transaction commit test GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate(); try { tx.begin(60000, "testBiz"); boolean result = nestTccActionProxy.prepareNestRequiredNew(null, 1); Assertions.assertFalse(result); if (result) { tx.commit(); } else { tx.rollback(); } } catch (Exception exx) { tx.rollback(); throw exx; } Assertions.assertTrue(nestTccAction.isCommit()); Assertions.assertTrue(tccAction.isCommit()); }
@Override public Connection connect(String url, Properties info) throws SQLException { // calciteConnection is initialized with an empty Beam schema, // we need to populate it with pipeline options, load table providers, etc return JdbcConnection.initialize((CalciteConnection) super.connect(url, info)); }
@Test public void testInternalConnect_setDirectRunner() throws Exception { CalciteConnection connection = JdbcDriver.connect(BOUNDED_TABLE, PipelineOptionsFactory.create()); Statement statement = connection.createStatement(); assertEquals(0, statement.executeUpdate("SET runner = direct")); assertTrue(statement.execute("SELECT * FROM test")); }
@Override public AdjacencyList subgraph(int[] vertices) { int[] v = vertices.clone(); Arrays.sort(v); AdjacencyList g = new AdjacencyList(v.length, digraph); for (int i = 0; i < v.length; i++) { Collection<Edge> edges = getEdges(v[i]); for (Edge edge : edges) { int j = edge.v1 == v[i] ? edge.v2 : edge.v1; j = Arrays.binarySearch(v, j); if (j >= 0) { g.addEdge(i, j, edge.weight); } } } return g; }
@Test public void testSubgraph() { System.out.println("subgraph digraph = false"); AdjacencyList graph = new AdjacencyList(8, false); graph.addEdge(0, 2); graph.addEdge(1, 7); graph.addEdge(2, 6); graph.addEdge(7, 4); graph.addEdge(3, 4); graph.addEdge(3, 5); graph.addEdge(5, 4); int[] v = {1, 3, 7}; AdjacencyList sub = graph.subgraph(v); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { System.out.print(sub.getWeight(i, j) + " "); } System.out.println(); } assertEquals(1.0, sub.getWeight(0, 2), 1E-10); assertEquals(1.0, sub.getWeight(2, 0), 1E-10); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingDeleteJsonRowNewRowToDataChangeRecord() { final DataChangeRecord dataChangeRecord = new DataChangeRecord( "partitionToken", Timestamp.ofTimeSecondsAndNanos(10L, 20), "transactionId", false, "1", "tableName", Arrays.asList( new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L), new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)), Collections.singletonList(new Mod("{\"column1\":\"value1\"}", null, null)), ModType.DELETE, ValueCaptureType.NEW_ROW, 10L, 2L, "transactionTag", true, null); final String jsonString = recordToJson(dataChangeRecord, false, false); assertNotNull(jsonString); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getPgJsonb(0)).thenReturn(jsonString); assertEquals( Collections.singletonList(dataChangeRecord), mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
@Override public ImmutableList<String> computeEntrypoint(List<String> jvmFlags) throws IOException { try (JarFile jarFile = new JarFile(jarPath.toFile())) { String mainClass = jarFile.getManifest().getMainAttributes().getValue(Attributes.Name.MAIN_CLASS); if (mainClass == null) { throw new IllegalArgumentException( "`Main-Class:` attribute for an application main class not defined in the input JAR's " + "manifest (`META-INF/MANIFEST.MF` in the JAR)."); } String classpath = JarLayers.APP_ROOT + "/explodedJar:" + JarLayers.APP_ROOT + "/dependencies/*"; ImmutableList.Builder<String> entrypoint = ImmutableList.builder(); entrypoint.add("java"); entrypoint.addAll(jvmFlags); entrypoint.add("-cp"); entrypoint.add(classpath); entrypoint.add(mainClass); return entrypoint.build(); } }
@Test public void testComputeEntrypoint_withMainClass() throws IOException, URISyntaxException { Path standardJar = Paths.get(Resources.getResource(STANDARD_JAR_WITH_CLASS_PATH_MANIFEST).toURI()); StandardExplodedProcessor standardExplodedModeProcessor = new StandardExplodedProcessor(standardJar, Paths.get("ignore"), JAR_JAVA_VERSION); ImmutableList<String> actualEntrypoint = standardExplodedModeProcessor.computeEntrypoint(ImmutableList.of()); assertThat(actualEntrypoint) .isEqualTo( ImmutableList.of("java", "-cp", "/app/explodedJar:/app/dependencies/*", "HelloWorld")); }
@Override public TwoFactorToken getToken(String userId, String operation) { return new TwoFactorToken() { private static final long serialVersionUID = -5148037320548431456L; @Override public void generate(long timeout) { TwoFactorTokenInfo info = new TwoFactorTokenInfo(); info.timeOut = timeout; tokens.put(createTokenInfoKey(userId, operation), new WeakReference<>(info)); } @Override public boolean expired() { TwoFactorTokenInfo info = getTokenInfo(userId, operation); if (info == null) { return true; } if (info.isExpire()) { tokens.remove(createTokenInfoKey(userId, operation)); return true; } info.lastRequestTime = System.currentTimeMillis(); return false; } }; }
@Test @SneakyThrows public void test() { TwoFactorToken twoFactorToken = tokenManager.getToken("test", "test"); Assert.assertTrue(twoFactorToken.expired()); twoFactorToken.generate(1000L); Assert.assertFalse(twoFactorToken.expired()); Thread.sleep(1100); Assert.assertTrue(twoFactorToken.expired()); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testGetName() { FileNameAnalyzer instance = new FileNameAnalyzer(); String expResult = "File Name Analyzer"; String result = instance.getName(); assertEquals(expResult, result); }
static Object parseCell(String cell, Schema.Field field) { Schema.FieldType fieldType = field.getType(); try { switch (fieldType.getTypeName()) { case STRING: return cell; case INT16: return Short.parseShort(cell); case INT32: return Integer.parseInt(cell); case INT64: return Long.parseLong(cell); case BOOLEAN: return Boolean.parseBoolean(cell); case BYTE: return Byte.parseByte(cell); case DECIMAL: return new BigDecimal(cell); case DOUBLE: return Double.parseDouble(cell); case FLOAT: return Float.parseFloat(cell); case DATETIME: return Instant.parse(cell); default: throw new UnsupportedOperationException( "Unsupported type: " + fieldType + ", consider using withCustomRecordParsing"); } } catch (IllegalArgumentException e) { throw new IllegalArgumentException( e.getMessage() + " field " + field.getName() + " was received -- type mismatch"); } }
@Test public void givenDatetimeWithSurroundingSpaces() { Instant datetime = Instant.parse("1234-01-23T10:00:05.000Z"); DefaultMapEntry cellToExpectedValue = new DefaultMapEntry(" 1234-01-23T10:00:05.000Z ", datetime); Schema schema = Schema.builder().addDateTimeField("a_datetime").addStringField("a_string").build(); IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> CsvIOParseHelpers.parseCell( cellToExpectedValue.getKey().toString(), schema.getField("a_datetime"))); assertEquals( "Invalid format: \" 1234-01-23T10:00:05.000Z \" field a_datetime was received -- type mismatch", e.getMessage()); }
public IndexConfig setType(IndexType type) { this.type = checkNotNull(type, "Index type cannot be null."); return this; }
@Test(expected = NullPointerException.class) public void testTypeNull() { new IndexConfig().setType(null); }