focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String getHeaders(final HttpHeaders headers) { Map<String, String> map = headers.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> String.join(",", entry.getValue()))); return JsonUtils.toJson(map); }
@Test public void testGetHeaders() { assertEquals(LogCollectUtils.getHeaders(request.getHeaders()), "{\"X-source\":\"mock test\"}"); }
@Override public boolean contains(String clientId) { return clients.containsKey(clientId); }
@Test void testContains() { assertTrue(persistentIpPortClientManager.contains(clientId)); String unUsedClientId = "127.0.0.1:8888#true"; assertFalse(persistentIpPortClientManager.contains(unUsedClientId)); }
public abstract int getNumberOfQueuedBuffers();
@Test void testBroadcastRecordWithRecordSpanningMultipleBuffers() throws Exception { BufferWritingResultPartition bufferWritingResultPartition = createResultPartition(ResultPartitionType.PIPELINED); int partialLength = bufferSize / 3; try { // emit the first record, record length = partialLength bufferWritingResultPartition.broadcastRecord(ByteBuffer.allocate(partialLength)); // emit the second record, record length = bufferSize bufferWritingResultPartition.broadcastRecord(ByteBuffer.allocate(bufferSize)); } finally { for (ResultSubpartition resultSubpartition : bufferWritingResultPartition.subpartitions) { PipelinedSubpartition pipelinedSubpartition = (PipelinedSubpartition) resultSubpartition; assertThat(pipelinedSubpartition.getNumberOfQueuedBuffers()).isEqualTo(2); assertThat(pipelinedSubpartition.getNextBuffer().getPartialRecordLength()).isZero(); assertThat(pipelinedSubpartition.getNextBuffer().getPartialRecordLength()) .isEqualTo(partialLength); } } }
@Override public PojoSerializerSnapshot<T> snapshotConfiguration() { return buildSnapshot( clazz, registeredClasses, registeredSerializers, fields, fieldSerializers, subclassSerializerCache, serializerConfig); }
@Test void testReconfigureWithDifferentPojoType() throws Exception { PojoSerializer<SubTestUserClassB> pojoSerializer1 = (PojoSerializer<SubTestUserClassB>) TypeExtractor.getForClass(SubTestUserClassB.class) .createSerializer(new SerializerConfigImpl()); // snapshot configuration and serialize to bytes TypeSerializerSnapshot pojoSerializerConfigSnapshot = pojoSerializer1.snapshotConfiguration(); byte[] serializedConfig; try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot( new DataOutputViewStreamWrapper(out), pojoSerializerConfigSnapshot); serializedConfig = out.toByteArray(); } PojoSerializer<SubTestUserClassA> pojoSerializer2 = (PojoSerializer<SubTestUserClassA>) TypeExtractor.getForClass(SubTestUserClassA.class) .createSerializer(new SerializerConfigImpl()); // read configuration again from bytes try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) { pojoSerializerConfigSnapshot = TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot( new DataInputViewStreamWrapper(in), Thread.currentThread().getContextClassLoader()); } @SuppressWarnings("unchecked") TypeSerializerSchemaCompatibility<SubTestUserClassA> compatResult = pojoSerializer2 .snapshotConfiguration() .resolveSchemaCompatibility(pojoSerializerConfigSnapshot); assertThat(compatResult.isIncompatible()).isTrue(); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("name", (Gauge<String>) runtime::getName); gauges.put("vendor", (Gauge<String>) () -> String.format(Locale.US, "%s %s %s (%s)", runtime.getVmVendor(), runtime.getVmName(), runtime.getVmVersion(), runtime.getSpecVersion())); gauges.put("uptime", (Gauge<Long>) runtime::getUptime); return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForTheJVMUptime() { final Gauge<Long> gauge = (Gauge<Long>) gauges.getMetrics().get("uptime"); assertThat(gauge.getValue()) .isEqualTo(100L); }
@Override public DataSink createDataSink(Context context) { // Validate the configuration FactoryHelper.createFactoryHelper(this, context).validate(); // Get the configuration directly from the context Configuration configuration = Configuration.fromMap(context.getFactoryConfiguration().toMap()); // Validate required options validateRequiredOptions(configuration); ZoneId zoneId = determineZoneId(context); ElasticsearchSinkOptions sinkOptions = buildSinkConnectorOptions(configuration); return new ElasticsearchDataSink(sinkOptions, zoneId); }
@Test void testLackRequiredOption() { DataSinkFactory sinkFactory = getElasticsearchDataSinkFactory(); List<String> requiredKeys = getRequiredKeys(sinkFactory); for (String requiredKey : requiredKeys) { // 创建一个新的配置 Map,包含所有必需选项 Map<String, String> options = new HashMap<>(createValidOptions()); // 移除当前正在测试的必需选项 options.remove(requiredKey); Configuration conf = Configuration.fromMap(options); // 打印日志以确保我们在测试缺少必需选项的情况 System.out.println("Testing missing required option: " + requiredKey); // 添加创建 DataSink 对象的代码 Assertions.assertThatThrownBy(() -> createDataSink(sinkFactory, conf)) // Assertions to check for missing required option .isInstanceOf(ValidationException.class) .hasMessageContaining( String.format( "One or more required options are missing.\n\n" + "Missing required options are:\n\n" + "%s", requiredKey)); } }
public ExitStatus(Options options) { this.options = options; }
@Test void wip_with_failed_scenarios() { createWipRuntime(); bus.send(testCaseFinishedWithStatus(Status.FAILED)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x0))); }
public static InetSocketAddress parseAddress(String address, int defaultPort) { return parseAddress(address, defaultPort, false); }
@Test void shouldParseAddressForIPv6() { InetSocketAddress socketAddress = AddressUtils.parseAddress("[1abc:2abc:3abc::5ABC:6abc]", 80); assertThat(socketAddress.isUnresolved()).isFalse(); assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); assertThat(socketAddress.getPort()).isEqualTo(80); assertThat(socketAddress.getHostString()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); }
static ObjectCreationExpr getTargetValueVariableInitializer(final TargetValue targetValueField) { final MethodDeclaration methodDeclaration = TARGETVALUE_TEMPLATE.getMethodsByName(GETTARGETVALUE).get(0).clone(); final BlockStmt targetValueBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(targetValueBody, TARGETVALUE).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, TARGETVALUE, targetValueBody))); final ObjectCreationExpr toReturn = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, TARGETVALUE, targetValueBody))) .asObjectCreationExpr(); toReturn.setArgument(0, getExpressionForObject(targetValueField.getValue())); toReturn.setArgument(1, getExpressionForObject(targetValueField.getDisplayValue())); toReturn.setArgument(2, getExpressionForObject(targetValueField.getPriorProbability())); toReturn.setArgument(3, getExpressionForObject(targetValueField.getDefaultValue())); return toReturn; }
@Test void getTargetValueVariableInitializer() throws IOException { TargetValue targetValue = convertToKieTargetValue(getRandomTargetValue()); ObjectCreationExpr retrieved = TargetValueFactory.getTargetValueVariableInitializer(targetValue); String text = getFileContent(TEST_01_SOURCE); Expression expected = JavaParserUtils.parseExpression(String.format(text, targetValue.getValue(), targetValue.getDisplayValue(), targetValue.getPriorProbability(), targetValue.getDefaultValue())); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); List<Class<?>> imports = Arrays.asList(Arrays.class, Collections.class, KiePMMLTargetValue.class, TargetValue.class); commonValidateCompilationWithImports(retrieved, imports); }
static float toFloat(Binary b) { short h = b.get2BytesLittleEndian(); int bits = h & 0xffff; int s = bits & SIGN_MASK; int e = (bits >>> EXPONENT_SHIFT) & SHIFTED_EXPONENT_MASK; int m = (bits) & SIGNIFICAND_MASK; int outE = 0; int outM = 0; if (e == 0) { // Denormal or 0 if (m != 0) { // Convert denorm fp16 into normalized fp32 float o = Float.intBitsToFloat(FP32_DENORMAL_MAGIC + m); o -= FP32_DENORMAL_FLOAT; return s == 0 ? o : -o; } } else { outM = m << 13; if (e == 0x1f) { // Infinite or NaN outE = 0xff; if (outM != 0) { // SNaNs are quieted outM |= FP32_QNAN_MASK; } } else { outE = e - EXPONENT_BIAS + FP32_EXPONENT_BIAS; } } int out = (s << 16) | (outE << FP32_EXPONENT_SHIFT) | outM; return Float.intBitsToFloat(out); }
@Test public void testFloat16ToFloat() { // Zeroes assertEquals(0.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {0x00, 0x00})), 0.0f); assertEquals(-0.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x80})), 0.0f); // NaN assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xc0, (byte) 0x7f})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x7e})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x7f})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0xfe})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0xff})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x7f, (byte) 0x7e})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x7f, (byte) 0xfe})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0xfe})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x7f})), 0.0f); assertEquals( Float.NaN, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0xff})), 0.0f); // infinities assertEquals( Float.POSITIVE_INFINITY, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x7c})), 0.0f); assertEquals( Float.NEGATIVE_INFINITY, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0xfc})), 0.0f); // subnormals assertEquals( 5.9604645E-8f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x00})), 0.0f); assertEquals( -65504.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0xfb})), 0.0f); assertEquals( +65504.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x7b})), 0.0f); assertEquals( -6.097555E-5f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x83})), 0.0f); assertEquals( -5.9604645E-8f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x80})), 0.0f); // Known values assertEquals( 1.0009765625f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x3c})), 0.0f); assertEquals(-2.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0xc0})), 0.0f); assertEquals( 6.1035156e-5f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x04})), 0.0f); // Inexact assertEquals( 65504.0f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x7b})), 0.0f); assertEquals( 0.33325195f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x55, (byte) 0x35})), 0.0f); // Inexact // Denormals (flushed to +/-0) assertEquals( 6.097555e-5f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x03})), 0.0f); assertEquals( 5.9604645e-8f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x00})), 0.0f); // Inexact assertEquals( -6.097555e-5f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x83})), 0.0f); assertEquals( -5.9604645e-8f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x80})), 0.0f); // Inexact // Miscellaneous values. In general, they're chosen to test the sign/exponent and // exponent/mantissa boundaries assertEquals( +0.00050163269043f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x10})), 0.0f); assertEquals( -0.00050163269043f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x90})), 0.0f); assertEquals( +0.000502109527588f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1d, (byte) 0x10})), 0.0f); assertEquals( -0.000502109527588f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1d, (byte) 0x90})), 0.0f); assertEquals( +0.00074577331543f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x12})), 0.0f); assertEquals( -0.00074577331543f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x92})), 0.0f); assertEquals( +0.00100326538086f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x14})), 0.0f); assertEquals( -0.00100326538086f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x94})), 0.0f); assertEquals( +32.875f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x50})), 0.0f); assertEquals( -32.875f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0xd0})), 0.0f); // A few subnormals for good measure assertEquals( +1.66893005371e-06f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x00})), 0.0f); assertEquals( -1.66893005371e-06f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x80})), 0.0f); assertEquals( +3.21865081787e-05f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x02})), 0.0f); assertEquals( -3.21865081787e-05f, Float16.toFloat(Binary.fromConstantByteArray(new byte[] {(byte) 0x1c, (byte) 0x82})), 0.0f); }
static Set<ShareGroupState> shareGroupStatesFromString(String input) { Set<ShareGroupState> parsedStates = Arrays.stream(input.split(",")).map(s -> ShareGroupState.parse(s.trim())).collect(Collectors.toSet()); if (parsedStates.contains(ShareGroupState.UNKNOWN)) { Collection<ShareGroupState> validStates = Arrays.stream(ShareGroupState.values()).filter(s -> s != ShareGroupState.UNKNOWN).collect(Collectors.toList()); throw new IllegalArgumentException("Invalid state list '" + input + "'. Valid states are: " + validStates.stream().map(Object::toString).collect(Collectors.joining(", "))); } return parsedStates; }
@Test public void testShareGroupStatesFromString() { Set<ShareGroupState> result = ShareGroupCommand.shareGroupStatesFromString("Stable"); assertEquals(Collections.singleton(ShareGroupState.STABLE), result); result = ShareGroupCommand.shareGroupStatesFromString("stable"); assertEquals(new HashSet<>(Collections.singletonList(ShareGroupState.STABLE)), result); result = ShareGroupCommand.shareGroupStatesFromString("dead"); assertEquals(new HashSet<>(Collections.singletonList(ShareGroupState.DEAD)), result); result = ShareGroupCommand.shareGroupStatesFromString("empty"); assertEquals(new HashSet<>(Collections.singletonList(ShareGroupState.EMPTY)), result); assertThrows(IllegalArgumentException.class, () -> ShareGroupCommand.shareGroupStatesFromString("bad, wrong")); assertThrows(IllegalArgumentException.class, () -> ShareGroupCommand.shareGroupStatesFromString(" bad, Stable")); assertThrows(IllegalArgumentException.class, () -> ShareGroupCommand.shareGroupStatesFromString(" , ,")); }
public static Class<?> getRawType(TypeRef<?> typeRef) { Type type = typeRef.getType(); if (type.getClass() == Class.class) { return (Class<?>) type; } else { return getRawType(typeRef.getType()); } }
@Test public void testGetRawType() throws NoSuchFieldException { assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("fromField3").getGenericType()), ArrayList.class); assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("raw").getGenericType()), List.class); assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("unknown2").getGenericType()), Map.class); assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("arrayUnknown2").getGenericType()), Map[].class); assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("unboundWildcard").getGenericType()), ArrayList.class); assertEquals( TypeUtils.getRawType(Test3.class.getDeclaredField("upperBound").getGenericType()), ArrayList.class); }
public void setSoTimeoutMs(int timeout) { kp.put("soTimeoutMs",timeout); }
@Test public void testConnectionTimeout() throws Exception { CrawlURI curi = makeCrawlURI("http://10.255.255.1/"); fetcher().setSoTimeoutMs(300); long start = System.currentTimeMillis(); fetcher().process(curi); long elapsed = System.currentTimeMillis() - start; assertTrue(elapsed >= 300 && elapsed < 400); // Httpcomponents throws org.apache.http.conn.ConnectTimeoutException, // commons-httpclient throws java.net.SocketTimeoutException. Both are // instances of InterruptedIOException assertEquals(1, curi.getNonFatalFailures().size()); assertTrue(curi.getNonFatalFailures().toArray()[0] instanceof InterruptedIOException); assertTrue(curi.getNonFatalFailures().toArray()[0].toString().matches("(?i).*connect.*timed out.*")); assertEquals(FetchStatusCodes.S_CONNECT_FAILED, curi.getFetchStatus()); assertEquals(0, curi.getFetchCompletedTime()); }
public OptionalInt nodeId() { for (MetaProperties metaProps : logDirProps.values()) { if (metaProps.nodeId().isPresent()) { return metaProps.nodeId(); } } return OptionalInt.empty(); }
@Test public void testNodeIdForFoo() { assertEquals(OptionalInt.of(2), FOO.nodeId()); }
@Override public int hashCode() { return Objects.hash(url, waitTime, watchDelay); }
@Test public void testHashCode() { assertEquals(Objects.hash(consulConfig.getUrl(), consulConfig.getWaitTime(), consulConfig.getWatchDelay()), consulConfig.hashCode()); }
public boolean checkAccess(UserGroupInformation callerUGI, ApplicationAccessType applicationAccessType, TimelineEntity entity) throws YarnException, IOException { if (LOG.isDebugEnabled()) { LOG.debug("Verifying the access of " + (callerUGI == null ? null : callerUGI.getShortUserName()) + " on the timeline entity " + new EntityIdentifier(entity.getEntityId(), entity.getEntityType())); } if (!adminAclsManager.areACLsEnabled()) { return true; } // find domain owner and acls AccessControlListExt aclExt = aclExts.get(entity.getDomainId()); if (aclExt == null) { aclExt = loadDomainFromTimelineStore(entity.getDomainId()); } if (aclExt == null) { throw new YarnException("Domain information of the timeline entity " + new EntityIdentifier(entity.getEntityId(), entity.getEntityType()) + " doesn't exist."); } String owner = aclExt.owner; AccessControlList domainACL = aclExt.acls.get(applicationAccessType); if (domainACL == null) { LOG.debug("ACL not found for access-type {} for domain {} owned by {}." + " Using default [{}]", applicationAccessType, entity.getDomainId(), owner, YarnConfiguration.DEFAULT_YARN_APP_ACL); domainACL = new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL); } if (callerUGI != null && (adminAclsManager.isAdmin(callerUGI) || callerUGI.getShortUserName().equals(owner) || domainACL.isUserAllowed(callerUGI))) { return true; } return false; }
@Test void testCorruptedOwnerInfoForDomain() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "owner"); TimelineACLsManager timelineACLsManager = new TimelineACLsManager(conf); TimelineDomain domain = new TimelineDomain(); try { timelineACLsManager.checkAccess( UserGroupInformation.createRemoteUser("owner"), domain); fail("Exception is expected"); } catch (YarnException e) { assertTrue(e.getMessage() .contains("is corrupted."), "It's not the exact expected exception"); } }
@Override public void terminate() throws Exception { isRunning = false; // wait for all containers to stop trackerOfReleasedResources.register(); trackerOfReleasedResources.arriveAndAwaitAdvance(); // shut down all components Exception exception = null; if (resourceManagerClient != null) { try { resourceManagerClient.stop(); } catch (Exception e) { exception = e; } } if (nodeManagerClient != null) { try { nodeManagerClient.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } } if (exception != null) { throw exception; } }
@Test void testTerminationDoesNotBlock() throws Exception { new Context() { { runTest( () -> { try { runInMainThread(() -> getDriver().terminate()); } catch (Exception ex) { log.error("cannot terminate driver", ex); fail("termination of driver failed"); } }); } }; }
public static List<MusicProtocol.MusicTrack> convertToAppleMusicPlaylistTracks(Collection<MusicPlaylistItem> musicPlaylistItems) { List<MusicProtocol.MusicTrack> convertedTracks = new ArrayList<>(); for (MusicPlaylistItem item : musicPlaylistItems) { if (item == null) { throw new IllegalStateException("MusicPlaylistItem cannot be null"); } if (StringUtils.isBlank(item.getPlaylistId())) { throw new IllegalStateException("MusicPlaylistIte::getPlaylistId cannot be blank"); } MusicProtocol.MusicTrack.Builder trackBuilder = convertMusicRecordingToTrackBuilder(item.getTrack()); trackBuilder.setPlaylistId(item.getPlaylistId()) .setPlaylistPosition(item.getOrder()); convertedTracks.add(trackBuilder.build()); } return convertedTracks; }
@Test public void testConvertToAppleMusicPlaylistTracks() { String expectedISRC = "1234567890ab"; String expectedTitle = "Expected Track Title"; long expectedDurationMillis = RandomUtils.nextLong(); String expectedArtistName = "Expected Artist Name"; MusicGroup musicGroup = new MusicGroup(expectedArtistName); String expectedICPN = "1234567890abcdefg"; String expectedAlbumTitle = "Expected Album Title"; MusicRelease musicRelease = new MusicRelease(expectedICPN, expectedAlbumTitle, List.of(musicGroup)); boolean expectedIsExplicit = true; MusicRecording musicRecording = new MusicRecording(expectedISRC, expectedTitle, expectedDurationMillis, musicRelease, List.of(musicGroup), expectedIsExplicit); String expectedPlaylistId = "ExpectedPlaylist.ID"; int expectedOrder = 1; String invalidPlaylistId = " "; MusicPlaylistItem musicPlaylistItem = new MusicPlaylistItem(musicRecording, expectedPlaylistId, expectedOrder); List<MusicPlaylistItem> playlistItems = new ArrayList<>(); playlistItems.add(musicPlaylistItem); // Null playlist List<MusicPlaylistItem> nullPlaylistItems = new ArrayList<>(); nullPlaylistItems.add(null); Assertions.assertThrows(IllegalStateException.class, () -> AppleMusicPlaylistConverter.convertToAppleMusicPlaylistTracks(nullPlaylistItems)); // One playlist item List<MusicProtocol.MusicTrack> musicTracks = AppleMusicPlaylistConverter.convertToAppleMusicPlaylistTracks(playlistItems); Assertions.assertNotNull(musicTracks); Assertions.assertFalse(musicTracks.isEmpty()); Assertions.assertEquals(musicTracks.size(), 1); MusicProtocol.MusicTrack validMusicTrack = musicTracks.get(0); Assertions.assertNotNull(validMusicTrack); Assertions.assertTrue(validMusicTrack.hasPlaylistId()); Assertions.assertEquals(validMusicTrack.getPlaylistId(), expectedPlaylistId); Assertions.assertTrue(validMusicTrack.hasPlaylistPosition()); Assertions.assertEquals(validMusicTrack.getPlaylistPosition(), expectedOrder); // Tested in testConvertMusicRecordingToTrackBuilder Assertions.assertTrue(validMusicTrack.hasMusicAlbum()); Assertions.assertTrue(validMusicTrack.hasIsrcCode()); Assertions.assertTrue(validMusicTrack.hasTitle()); Assertions.assertTrue(validMusicTrack.hasDurationMillis()); Assertions.assertTrue(validMusicTrack.hasIsExplicit()); Assertions.assertEquals(validMusicTrack.getIsExplicit(), expectedIsExplicit); // One playlist item with invalid playlist id MusicPlaylistItem invalidPlaylistIdItem = new MusicPlaylistItem(musicRecording, invalidPlaylistId, expectedOrder); Assertions.assertThrows(IllegalStateException.class, () -> AppleMusicPlaylistConverter.convertToAppleMusicPlaylistTracks(List.of(musicPlaylistItem, invalidPlaylistIdItem))); }
public boolean isTimeoutException() { return internalException instanceof TimeoutException; }
@Test public void testIsNotTimeoutException() { assertFalse(TOPIC_EXISTS.isTimeoutException()); assertFalse(REJECTED_EXECUTION.isTimeoutException()); assertFalse(INTERRUPTED.isTimeoutException()); assertFalse(NULL_POINTER.isTimeoutException()); assertFalse(NOT_LEADER.isTimeoutException()); }
@Override @Nullable public Object convert(@Nullable String value) { if (isNullOrEmpty(value)) { return null; } LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone); final DateTimeFormatter formatter; if (containsTimeZone) { formatter = DateTimeFormat .forPattern(dateFormat) .withDefaultYear(YearMonth.now(timeZone).getYear()) .withLocale(locale); } else { formatter = DateTimeFormat .forPattern(dateFormat) .withDefaultYear(YearMonth.now(timeZone).getYear()) .withLocale(locale) .withZone(timeZone); } return DateTime.parse(value, formatter); }
@Test public void convertUsesEnglishIfLocaleIsInvalid() throws Exception { final Converter c = new DateConverter(config("dd/MMM/YYYY HH:mm:ss Z", null, "Wurstweck")); final DateTime dateTime = (DateTime) c.convert("11/May/2017 15:10:48 +0200"); assertThat(dateTime).isEqualTo("2017-05-11T13:10:48.000Z"); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test(timeOut = 30 * 1000) public void testBundlesWithIsolationPolicies() { List<BrokerFilter> filters = new ArrayList<>(); var allocationPoliciesSpy = mock(SimpleResourceAllocationPolicies.class); IsolationPoliciesHelper isolationPoliciesHelper = new IsolationPoliciesHelper(allocationPoliciesSpy); BrokerIsolationPoliciesFilter filter = new BrokerIsolationPoliciesFilter(isolationPoliciesHelper); filters.add(filter); UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = spy(new TransferShedder(pulsar, counter, filters, isolationPoliciesHelper, antiAffinityGroupPolicyHelper)); setIsolationPolicies(allocationPoliciesSpy, "my-tenant/my-namespaceE", Set.of("broker5:8080"), Set.of(), Set.of(), 1); var ctx = setupContext(); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(true); doReturn(ctx.brokerConfiguration()).when(pulsar).getConfiguration(); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); // Test unload a has isolation policies broker. ctx.brokerConfiguration().setLoadBalancerTransferEnabled(false); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); expected = new HashSet<>(); expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.empty()), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); // test setLoadBalancerSheddingBundlesWithPoliciesEnabled=false; doReturn(CompletableFuture.completedFuture(true)) .when(allocationPoliciesSpy).areIsolationPoliciesPresentAsync(any()); ctx.brokerConfiguration().setLoadBalancerTransferEnabled(true); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(false); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 1); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); // Test unload a has isolation policies broker. ctx.brokerConfiguration().setLoadBalancerTransferEnabled(false); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 2); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
@Override public Iterable<RedisClusterNode> clusterGetNodes() { return read(null, StringCodec.INSTANCE, CLUSTER_NODES); }
@Test public void testClusterGetNodes() { Iterable<RedisClusterNode> nodes = connection.clusterGetNodes(); assertThat(nodes).hasSize(6); for (RedisClusterNode redisClusterNode : nodes) { assertThat(redisClusterNode.getLinkState()).isNotNull(); assertThat(redisClusterNode.getFlags()).isNotEmpty(); assertThat(redisClusterNode.getHost()).isNotNull(); assertThat(redisClusterNode.getPort()).isNotNull(); assertThat(redisClusterNode.getId()).isNotNull(); assertThat(redisClusterNode.getType()).isNotNull(); if (redisClusterNode.getType() == NodeType.MASTER) { assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty(); } else { assertThat(redisClusterNode.getMasterId()).isNotNull(); } } }
@Bean @ConfigurationProperties(prefix = "shenyu") public ShenyuClientConfig shenyuClientConfig() { return new ShenyuClientConfig(); }
@Test public void testShenyuClientConfig() { MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class); registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token")); applicationContextRunner.run(context -> { ShenyuClientConfig config = context.getBean("shenyuClientConfig", ShenyuClientConfig.class); assertNotNull(config); assertThat(config.getClient()).containsKey("dubbo"); }); registerUtilsMockedStatic.close(); }
@Udf public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter( description = "The array to sort") final List<T> input) { return arraySortWithDirection(input, "ASC"); }
@Test public void shouldSortInts() { final List<Integer> input = Arrays.asList(1, 3, -2); final List<Integer> output = udf.arraySortDefault(input); assertThat(output, contains(-2, 1, 3)); }
public boolean isInstalled() { return mTrigger != null; }
@Test public void testImeInstalledWhenOnlyVoice() { addInputMethodInfo(List.of("voice")); Assert.assertTrue(ImeTrigger.isInstalled(mMockInputMethodService)); }
public static String jaasConfig(String moduleName, Map<String, String> options) { StringJoiner joiner = new StringJoiner(" "); for (Entry<String, String> entry : options.entrySet()) { String key = Objects.requireNonNull(entry.getKey()); String value = Objects.requireNonNull(entry.getValue()); if (key.contains("=") || key.contains(";")) { throw new IllegalArgumentException("Keys must not contain '=' or ';'"); } if (moduleName.isEmpty() || moduleName.contains(";") || moduleName.contains("=")) { throw new IllegalArgumentException("module name must be not empty and must not contain '=' or ';'"); } else { joiner.add(key + "=\"" + value + "\""); } } return moduleName + " required " + joiner + ";"; }
@Test public void testValidJaasConfig() { Map<String, String> options = new HashMap<>(); options.put("key1", "value1"); options.put("key2", "value2"); String moduleName = "Module"; String expected = "Module required key1=\"value1\" key2=\"value2\";"; assertEquals(expected, AuthenticationUtils.jaasConfig(moduleName, options)); }
@Override public String getPrefix() { return String.format("%s.%s", DAVSSLProtocol.class.getPackage().getName(), StringUtils.upperCase(this.getType().name())); }
@Test public void testPrefix() { assertEquals("ch.cyberduck.core.dav.DAV", new DAVSSLProtocol().getPrefix()); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof NiciraNshNp) { NiciraNshNp that = (NiciraNshNp) obj; return Objects.equals(nshNp, that.nshNp); } return false; }
@Test public void testEquals() { final NiciraNshNp nshNp1 = new NiciraNshNp(np1); final NiciraNshNp sameAsNshNp1 = new NiciraNshNp(np1); final NiciraNshNp nshNp2 = new NiciraNshNp(np2); new EqualsTester().addEqualityGroup(nshNp1, sameAsNshNp1).addEqualityGroup(nshNp2) .testEquals(); }
public boolean isRunning() { try { process.exitValue(); } catch (IllegalThreadStateException e) { return true; } return false; }
@Test void shouldReturnTrueWhenAProcessIsRunning() { Process process = getMockedProcess(mock(OutputStream.class)); when(process.exitValue()).thenThrow(new IllegalThreadStateException()); ProcessWrapper processWrapper = new ProcessWrapper(process, null, "", inMemoryConsumer(), UTF_8, null); assertThat(processWrapper.isRunning()).isTrue(); }
public static AggregateFunctionInitArguments createAggregateFunctionInitArgs( final int numInitArgs, final FunctionCall functionCall ) { return createAggregateFunctionInitArgs( numInitArgs, Collections.emptyList(), functionCall, KsqlConfig.empty() ); }
@Test public void shouldCreateDummyArgs() { // Given: when(functionCall.getArguments()).thenReturn(ImmutableList.of( new UnqualifiedColumnReferenceExp(ColumnName.of("FOO")), new UnqualifiedColumnReferenceExp(ColumnName.of("Bob")), new StringLiteral("No issue here") )); // When: AggregateFunctionInitArguments initArgs = UdafUtil.createAggregateFunctionInitArgs( 1, functionCall ); // Then: assertEquals(0, initArgs.udafIndices().size()); assertEquals(1, initArgs.args().size()); assertEquals("No issue here", initArgs.arg(0)); assertTrue(initArgs.config().isEmpty()); }
@Override public BuiltInScalarFunctionImplementation specialize(BoundVariables boundVariables, int arity, FunctionAndTypeManager functionAndTypeManager) { ImmutableList.Builder<ScalarFunctionImplementationChoice> implementationChoices = ImmutableList.builder(); for (PolymorphicScalarFunctionChoice choice : choices) { implementationChoices.add(getScalarFunctionImplementationChoice(boundVariables, functionAndTypeManager, choice)); } return new BuiltInScalarFunctionImplementation(implementationChoices.build()); }
@Test(expectedExceptions = {IllegalStateException.class}, expectedExceptionsMessageRegExp = "two matching methods \\(varcharToBigintReturnFirstExtraParameter and varcharToBigintReturnExtraParameter\\) for parameter types \\[varchar\\(10\\)\\]") public void testFailIfTwoMethodsWithSameArguments() { SqlScalarFunction function = SqlScalarFunction.builder(TestMethods.class) .signature(SIGNATURE) .deterministic(true) .calledOnNullInput(false) .choice(choice -> choice .implementation(methodsGroup -> methodsGroup.methods("varcharToBigintReturnFirstExtraParameter")) .implementation(methodsGroup -> methodsGroup.methods("varcharToBigintReturnExtraParameter"))) .build(); function.specialize(BOUND_VARIABLES, 1, FUNCTION_AND_TYPE_MANAGER); }
public DdlResult getRenameTableResult() { return renameTableResult; }
@Test public void getRenameTableResultOutputNull() { // Arrange final DdlResult objectUnderTest = new DdlResult(); // Act final DdlResult actual = objectUnderTest.getRenameTableResult(); // Assert result Assert.assertNull(actual); }
String getRepositoryFilePath( FileDialogOperation fileDialogOperation ) { return getRepositoryFilePath( (RepositoryElementMetaInterface) fileDialogOperation.getRepositoryObject() ); }
@Test public void testGetRepositoryFilePath() { RepositoryDirectoryInterface repositoryDirectory = mock( RepositoryDirectoryInterface.class ); when( repositoryDirectory.getPath() ).thenReturn( "/home/devuser/files" ); RepositoryObject repositoryObject = mock( RepositoryObject.class ); when( repositoryObject.getRepositoryDirectory() ).thenReturn( repositoryDirectory ); when( repositoryObject.getName() ).thenReturn( "food.txt" ); FileDialogOperation fileDialogOperation = createFileDialogOperation(); fileDialogOperation.setRepositoryObject( repositoryObject ); assertEquals( "/home/devuser/files/food.txt", testInstance.getRepositoryFilePath( fileDialogOperation ).replace( '\\', '/' ) ); }
PartitionRegistration getPartition(Uuid topicId, int partitionId) { TopicControlInfo topic = topics.get(topicId); if (topic == null) { return null; } return topic.parts.get(partitionId); }
@Test public void testEligibleLeaderReplicas_BrokerFence() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().setIsElrEnabled(true).build(); ReplicationControlManager replicationControl = ctx.replicationControl; ctx.registerBrokers(0, 1, 2, 3); ctx.unfenceBrokers(0, 1, 2, 3); CreatableTopicResult createTopicResult = ctx.createTestTopic("foo", new int[][] {new int[] {0, 1, 2, 3}}); TopicIdPartition topicIdPartition = new TopicIdPartition(createTopicResult.topicId(), 0); assertEquals(OptionalInt.of(0), ctx.currentLeader(topicIdPartition)); ctx.alterTopicConfig("foo", TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"); ctx.fenceBrokers(Utils.mkSet(2, 3)); PartitionRegistration partition = replicationControl.getPartition(topicIdPartition.topicId(), topicIdPartition.partitionId()); assertArrayEquals(new int[]{3}, partition.elr, partition.toString()); assertArrayEquals(new int[]{}, partition.lastKnownElr, partition.toString()); ctx.fenceBrokers(Utils.mkSet(1, 2, 3)); partition = replicationControl.getPartition(topicIdPartition.topicId(), topicIdPartition.partitionId()); assertArrayEquals(new int[]{1, 3}, partition.elr, partition.toString()); assertArrayEquals(new int[]{}, partition.lastKnownElr, partition.toString()); ctx.unfenceBrokers(0, 1, 2, 3); partition = replicationControl.getPartition(topicIdPartition.topicId(), topicIdPartition.partitionId()); assertArrayEquals(new int[]{1, 3}, partition.elr, partition.toString()); assertArrayEquals(new int[]{}, partition.lastKnownElr, partition.toString()); }
@Override public String toString() { return logSequenceNumber.asString(); }
@Test void assertToString() { assertThat(new WALPosition(new PostgreSQLLogSequenceNumber(LogSequenceNumber.valueOf(100L))).toString(), is("0/64")); }
@Override public ParDoFn create( PipelineOptions options, CloudObject cloudUserFn, List<SideInputInfo> sideInputInfos, TupleTag<?> mainOutputTag, Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { DataflowStepContext stepContext = executionContext.getStepContext(operationContext); checkArgument( stepContext instanceof StreamingModeExecutionContext.StreamingModeStepContext, "stepContext must be a StreamingModeStepContext to use StreamingPCollectionViewWriterFn"); Coder<?> coder = CloudObjects.coderFromCloudObject( CloudObject.fromSpec(Structs.getObject(cloudUserFn, PropertyNames.ENCODING))); checkState( coder instanceof FullWindowedValueCoder, "Expected to received an instanceof an %s but got %s", FullWindowedValueCoder.class.getSimpleName(), coder); FullWindowedValueCoder<?> windowedValueCoder = (FullWindowedValueCoder<?>) coder; return new StreamingPCollectionViewWriterParDoFn( (StreamingModeExecutionContext.StreamingModeStepContext) stepContext, new TupleTag<>(Structs.getString(cloudUserFn, WorkerPropertyNames.SIDE_INPUT_ID)), (Coder) windowedValueCoder.getValueCoder(), (Coder) windowedValueCoder.getWindowCoder()); }
@Test public void testConstruction() throws Exception { DataflowOperationContext mockOperationContext = Mockito.mock(DataflowOperationContext.class); DataflowExecutionContext mockExecutionContext = Mockito.mock(DataflowExecutionContext.class); DataflowStepContext mockStepContext = Mockito.mock(StreamingModeExecutionContext.StepContext.class); when(mockExecutionContext.getStepContext(mockOperationContext)).thenReturn(mockStepContext); CloudObject coder = CloudObjects.asCloudObject( WindowedValue.getFullCoder(BigEndianIntegerCoder.of(), GlobalWindow.Coder.INSTANCE), /*sdkComponents=*/ null); ParDoFn parDoFn = new StreamingPCollectionViewWriterDoFnFactory() .create( null /* pipeline options */, CloudObject.fromSpec( ImmutableMap.of( PropertyNames.OBJECT_TYPE_NAME, "StreamingPCollectionViewWriterDoFn", PropertyNames.ENCODING, coder, WorkerPropertyNames.SIDE_INPUT_ID, "test-side-input-id")), null /* side input infos */, null /* main output tag */, null /* output tag to receiver index */, mockExecutionContext, mockOperationContext); assertThat(parDoFn, instanceOf(StreamingPCollectionViewWriterParDoFn.class)); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMapAddClearBeforeGet() throws Exception { StateTag<MapState<String, Integer>> addr = StateTags.map("map", StringUtf8Coder.of(), VarIntCoder.of()); MapState<String, Integer> mapState = underTest.state(NAMESPACE, addr); final String tag = "tag"; SettableFuture<Iterable<Map.Entry<ByteString, Integer>>> prefixFuture = SettableFuture.create(); when(mockReader.valuePrefixFuture( protoKeyFromUserKey(null, StringUtf8Coder.of()), STATE_FAMILY, VarIntCoder.of())) .thenReturn(prefixFuture); ReadableState<Integer> result = mapState.get("tag"); result = result.readLater(); waitAndSet( prefixFuture, ImmutableList.of( new AbstractMap.SimpleEntry<>(protoKeyFromUserKey(tag, StringUtf8Coder.of()), 1)), 50); assertFalse(mapState.isEmpty().read()); mapState.clear(); assertTrue(mapState.isEmpty().read()); assertNull(mapState.get("tag").read()); mapState.put("tag", 2); assertFalse(mapState.isEmpty().read()); assertEquals(2, (int) result.read()); }
@Override public void deleteObject(String accountName, ObjectType objectType, String objectKey) { if (objectType.equals(ObjectType.CANARY_RESULT_ARCHIVE)) { sqlCanaryArchiveRepo.deleteById(objectKey); return; } if (objectType.equals(ObjectType.CANARY_CONFIG)) { sqlCanaryConfigRepo.deleteById(objectKey); return; } if (objectType.equals(ObjectType.METRIC_SET_PAIR_LIST)) { sqlMetricSetPairsRepo.deleteById(objectKey); return; } if (objectType.equals(ObjectType.METRIC_SET_LIST)) { sqlMetricSetsRepo.deleteById(objectKey); return; } throw new IllegalArgumentException("Unsupported object type: " + objectType); }
@Test public void testDeleteObjectWhenCanaryArchive() { var testAccountName = UUID.randomUUID().toString(); var testObjectType = ObjectType.CANARY_RESULT_ARCHIVE; var testObjectKey = UUID.randomUUID().toString(); sqlStorageService.deleteObject(testAccountName, testObjectType, testObjectKey); verify(sqlCanaryArchiveRepo).deleteById(testObjectKey); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read the Op Code final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0); // Estimate the expected operand size based on the Op Code int expectedOperandSize; switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> // UINT8 expectedOperandSize = 1; case OP_CODE_CALIBRATION_VALUE_RESPONSE -> // Calibration Value expectedOperandSize = 10; case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE, OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE, OP_CODE_HYPO_ALERT_LEVEL_RESPONSE, OP_CODE_HYPER_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> // SFLOAT expectedOperandSize = 2; case OP_CODE_RESPONSE_CODE -> // Request Op Code (UINT8), Response Code Value (UINT8) expectedOperandSize = 2; default -> { onInvalidDataReceived(device, data); return; } } // Verify packet length if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) { onInvalidDataReceived(device, data); return; } // Verify CRC if present final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize); if (expectedCrc != actualCrc) { onCGMSpecificOpsResponseReceivedWithCrcError(device, data); return; } } switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> { final int interval = data.getIntValue(Data.FORMAT_UINT8, 1); onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent); return; } case OP_CODE_CALIBRATION_VALUE_RESPONSE -> { final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1); final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3); final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5); @SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F; final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4; final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6); final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8); final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10); onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration, calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation, calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent); return; } case OP_CODE_RESPONSE_CODE -> { final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode == CGM_RESPONSE_SUCCESS) { onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent); } else { onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent); } return; } } // Read SFLOAT value final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1); switch (opCode) { case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent); case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent); case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHypoAlertReceived(device, value, crcPresent); case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHyperAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent); } }
@Test public void onContinuousGlucoseCommunicationIntervalReceived_secured() { final Data data = new Data(new byte[] { 3, 10, (byte) 0x8A, (byte) 0x75}); callback.onDataReceived(null, data); assertEquals("Interval", 10, interval); assertTrue(secured); }
public static <T> Values<T> of(Iterable<T> elems) { return new Values<>(elems, Optional.absent(), Optional.absent(), false); }
@Test public void testCreateExplicitSchema() { PCollection<String> out = p.apply( Create.of("a", "b", "c", "d") .withSchema( STRING_SCHEMA, TypeDescriptors.strings(), s -> Row.withSchema(STRING_SCHEMA).addValue(s).build(), r -> r.getString("field"))); assertThat(out.getCoder(), instanceOf(SchemaCoder.class)); }
@Override public KeyVersion createKey(final String name, final byte[] material, final Options options) throws IOException { return doOp(new ProviderCallable<KeyVersion>() { @Override public KeyVersion call(KMSClientProvider provider) throws IOException { return provider.createKey(name, material, options); } }, nextIdx(), false); }
@Test public void testClientRetriesWithAccessControlException() throws Exception { Configuration conf = new Configuration(); conf.setInt( CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3); KMSClientProvider p1 = mock(KMSClientProvider.class); when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenThrow(new AccessControlException("p1")); KMSClientProvider p2 = mock(KMSClientProvider.class); when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenThrow(new IOException("p2")); KMSClientProvider p3 = mock(KMSClientProvider.class); when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenThrow(new IOException("p3")); when(p1.getKMSUrl()).thenReturn("p1"); when(p2.getKMSUrl()).thenReturn("p2"); when(p3.getKMSUrl()).thenReturn("p3"); LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider( new KMSClientProvider[] {p1, p2, p3}, 0, conf); try { kp.createKey("test3", new Options(conf)); fail("Should fail because provider p1 threw an AccessControlException"); } catch (Exception e) { assertTrue(e instanceof AccessControlException); } verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"), Mockito.any(Options.class)); verify(p2, Mockito.never()).createKey(Mockito.eq("test3"), Mockito.any(Options.class)); verify(p3, Mockito.never()).createKey(Mockito.eq("test3"), Mockito.any(Options.class)); }
public static <T> T getItemAtPositionOrNull(T[] array, int position) { if (position >= 0 && array.length > position) { return array[position]; } return null; }
@Test public void getItemAtPositionOrNull_whenNegative_thenReturnNull() { Object obj = new Object(); Object[] src = new Object[1]; src[0] = obj; Object result = ArrayUtils.getItemAtPositionOrNull(src, -1); assertNull(result); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableForwardIndexForInvertedIndexDisabledColumn() throws Exception { Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS); forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); forwardIndexDisabledColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns); Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns); invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns); validateIndexMap(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX, true, true); validateIndexesForForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); // Validate nothing has changed validateIndexMap(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX, true, true); validateIndexesForForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); // In column metadata, nothing should change. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); validateMetadataProperties(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX, metadata.hasDictionary(), metadata.getColumnMaxLength(), metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); }
public void close(final boolean closeQueries) { primaryContext.getQueryRegistry().close(closeQueries); try { cleanupService.stopAsync().awaitTerminated( this.primaryContext.getKsqlConfig() .getLong(KsqlConfig.KSQL_QUERY_CLEANUP_SHUTDOWN_TIMEOUT_MS), TimeUnit.MILLISECONDS); } catch (final TimeoutException e) { log.warn("Timed out while closing cleanup service. " + "External resources for the following applications may be orphaned: {}", cleanupService.pendingApplicationIds() ); } engineMetrics.close(); aggregateMetricsCollector.shutdown(); }
@Test public void shouldCleanUpInternalTopicsOnClose() { // Given: setupKsqlEngineWithSharedRuntimeDisabled(); final QueryMetadata query = KsqlEngineTestUtil.executeQuery( serviceContext, ksqlEngine, "select * from test1 EMIT CHANGES;", ksqlConfig, Collections.emptyMap() ); query.start(); // When: query.close(); // Then: awaitCleanupComplete(); verify(topicClient, times(2)).deleteInternalTopics(query.getQueryApplicationId()); }
public Collection<Value> getRange(Key first, Key last) { List<Value> values = new ArrayList(); // Return the values of the entries found in cache for (Map.Entry<Key, EntryWrapper<Key, Value>> entry : entries.subMap(first, true, last, true).entrySet()) { Value value = getValue(entry.getKey(), entry.getValue()); if (value != null) { values.add(value); } } return values; }
@Test public void getRange() { RangeCache<Integer, RefString> cache = new RangeCache<>(); cache.put(0, new RefString("0")); cache.put(1, new RefString("1")); cache.put(3, new RefString("3")); cache.put(5, new RefString("5")); assertEquals(cache.getRange(1, 8), Lists.newArrayList(new RefString("1"), new RefString("3"), new RefString("5"))); cache.put(8, new RefString("8")); assertEquals(cache.getRange(1, 8), Lists.newArrayList(new RefString("1"), new RefString("3"), new RefString("5"), new RefString("8"))); cache.clear(); assertEquals(cache.getSize(), 0); assertEquals(cache.getNumberOfEntries(), 0); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() + mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() + mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ? -1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() + mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit()); gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed()); gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax()); gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted()); gauges.put("heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax()); } }); gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("non-heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getNonHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); for (final MemoryPoolMXBean pool : memoryPools) { final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-")); gauges.put(name(poolName, "usage"), new RatioGauge() { @Override protected Ratio getRatio() { MemoryUsage usage = pool.getUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax()); gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed()); gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted()); // Only register GC usage metrics if the memory pool supports usage statistics. if (pool.getCollectionUsage() != null) { gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () -> pool.getCollectionUsage().getUsed()); } gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit()); } return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForWeirdMemoryPoolUsage() { final Gauge gauge = (Gauge) gauges.getMetrics().get("pools.Weird-Pool.usage"); assertThat(gauge.getValue()) .isEqualTo(3.0); }
@Override public Result apply(PathData item, int depth) throws IOException { String name = getPath(item).getName(); if (!caseSensitive) { name = StringUtils.toLowerCase(name); } if (globPattern.matches(name)) { return Result.PASS; } else { return Result.FAIL; } }
@Test public void applyNotMatch() throws IOException { setup("name"); PathData item = new PathData("/directory/path/notname", mockFs.getConf()); assertEquals(Result.FAIL, name.apply(item, -1)); }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testPreemptSkippedAMContainers() { int[][] qData = new int[][] { // / A B { 100, 10, 90 }, // abs { 100, 100, 100 }, // maxcap { 100, 100, 0 }, // used { 70, 20, 90 }, // pending { 0, 0, 0 }, // reserved { 5, 4, 1 }, // apps { -1, 5, 5 }, // req granularity { 2, 0, 0 }, // subqueues }; setAMContainer = true; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // All 5 containers of appD will be preempted including AM container. verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appD))); // All 5 containers of appC will be preempted including AM container. verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appC))); // By skipping AM Container, all other 4 containers of appB will be // preempted verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB))); // By skipping AM Container, all other 4 containers of appA will be // preempted verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appA))); setAMContainer = false; }
public LegacyDeleteResult<T, K> removeById(K objectId) { return new LegacyDeleteResult<>(delegate.removeById(objectId)); }
@Test void removeById() { final var collection = jacksonCollection("simple", Simple.class); final var foo = new Simple("000000000000000000000001", "foo"); final var bar = new Simple("000000000000000000000002", "bar"); collection.insert(List.of(foo, bar)); assertThat(collection.removeById(foo.id()).getN()).isEqualTo(1); assertThat((Iterable<Simple>) collection.find()).containsExactly(bar); }
@Override public ConnectorMetadata getMetadata() { Optional<IHiveMetastore> hiveMetastore = Optional.empty(); if (isHiveOrGlueCatalogType()) { MetastoreType metastoreType = MetastoreType.get(catalogType); HiveMetaClient metaClient = HiveMetaClient.createHiveMetaClient(this.hdfsEnvironment, properties); hiveMetastore = Optional.of(new HiveMetastore(metaClient, catalogName, metastoreType)); // TODO caching hiveMetastore support } return new KuduMetadata(catalogName, hdfsEnvironment, kuduMaster, schemaEmulationEnabled, schemaEmulationPrefix, hiveMetastore); }
@Test public void testGetMetadataWithHiveCatalog() { Map<String, String> properties = new HashMap<>(); properties.put("kudu.master", "localhost:7051"); properties.put("kudu.catalog.type", "hive"); properties.put("hive.metastore.uris", "thrift://127.0.0.1:9083"); KuduConnector connector = new KuduConnector(new ConnectorContext("kudu_catalog", "kudu", properties)); ConnectorMetadata metadata = connector.getMetadata(); Assert.assertTrue(metadata instanceof KuduMetadata); }
public static List<?> convertToList(Schema schema, Object value) { return convertToArray(ARRAY_SELECTOR_SCHEMA, value); }
@Test public void shouldFailToConvertToListFromStringWithNonCommonElementTypeAndBlankElement() { assertThrows(DataException.class, () -> Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, 3, \"four\",,,]")); }
@Override public boolean shouldWait() { RingbufferContainer ringbuffer = getRingBufferContainerOrNull(); if (resultSet == null) { resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter); sequence = startSequence; } if (ringbuffer == null) { return minSize > 0; } sequence = ringbuffer.clampReadSequenceToBounds(sequence); if (minSize == 0) { if (sequence < ringbuffer.tailSequence() + 1) { readMany(ringbuffer); } return false; } if (resultSet.isMinSizeReached()) { // enough items have been read, we are done. return false; } if (sequence == ringbuffer.tailSequence() + 1) { // the sequence is not readable return true; } readMany(ringbuffer); return !resultSet.isMinSizeReached(); }
@Test public void whenFilterProvidedAndAllItemsAvailable() { long startSequence = ringbuffer.tailSequence() + 1; IFunction<String, Boolean> filter = input -> input.startsWith("good"); ReadManyOperation op = getReadManyOperation(startSequence, 3, 3, filter); ringbuffer.add("bad1"); ringbuffer.add("good1"); ringbuffer.add("bad2"); ringbuffer.add("good2"); ringbuffer.add("bad3"); ringbuffer.add("good3"); assertFalse(op.shouldWait()); ReadResultSetImpl response = getReadResultSet(op); assertEquals(startSequence + 6, op.sequence); assertEquals(asList("good1", "good2", "good3"), response); assertEquals(6, response.getNextSequenceToReadFrom()); }
@Override public CompletableFuture<Void> cleanupAsync(JobID jobId) { mainThreadExecutor.assertRunningInMainThread(); CompletableFuture<Void> cleanupFuture = FutureUtils.completedVoidFuture(); for (CleanupWithLabel<T> cleanupWithLabel : prioritizedCleanup) { cleanupFuture = cleanupFuture.thenCompose( ignoredValue -> withRetry( jobId, cleanupWithLabel.getLabel(), cleanupWithLabel.getCleanup())); } return cleanupFuture.thenCompose( ignoredValue -> FutureUtils.completeAll( regularCleanup.stream() .map( cleanupWithLabel -> withRetry( jobId, cleanupWithLabel.getLabel(), cleanupWithLabel.getCleanup())) .collect(Collectors.toList()))); }
@Test void testHighestPriorityCleanupBlocksAllOtherCleanups() { final SingleCallCleanup highPriorityCleanup = SingleCallCleanup.withoutCompletionOnCleanup(); final SingleCallCleanup lowerThanHighPriorityCleanup = SingleCallCleanup.withCompletionOnCleanup(); final SingleCallCleanup noPriorityCleanup0 = SingleCallCleanup.withCompletionOnCleanup(); final SingleCallCleanup noPriorityCleanup1 = SingleCallCleanup.withCompletionOnCleanup(); final DefaultResourceCleaner<CleanupCallback> testInstance = createTestInstanceBuilder() .withPrioritizedCleanup("Prio #0", highPriorityCleanup) .withPrioritizedCleanup("Prio #1", lowerThanHighPriorityCleanup) .withRegularCleanup("Reg #0", noPriorityCleanup0) .withRegularCleanup("Reg #1", noPriorityCleanup1) .build(); final CompletableFuture<Void> overallCleanupResult = testInstance.cleanupAsync(JOB_ID); assertThat(highPriorityCleanup.isDone()).isFalse(); assertThat(lowerThanHighPriorityCleanup.isDone()).isFalse(); assertThat(noPriorityCleanup0.isDone()).isFalse(); assertThat(noPriorityCleanup1.isDone()).isFalse(); assertThat(overallCleanupResult.isDone()).isFalse(); highPriorityCleanup.completeCleanup(); assertThat(overallCleanupResult).isCompleted(); assertThat(highPriorityCleanup.isDone()).isTrue(); assertThat(lowerThanHighPriorityCleanup.isDone()).isTrue(); assertThat(noPriorityCleanup0.isDone()).isTrue(); assertThat(noPriorityCleanup1.isDone()).isTrue(); }
public <T extends AwsClientBuilder> void applyClientRegionConfiguration(T builder) { if (clientRegion != null) { builder.region(Region.of(clientRegion)); } }
@Test public void testApplyClientRegion() { Map<String, String> properties = Maps.newHashMap(); properties.put(AwsClientProperties.CLIENT_REGION, "us-east-1"); AwsClientProperties awsClientProperties = new AwsClientProperties(properties); S3ClientBuilder mockS3ClientBuilder = Mockito.mock(S3ClientBuilder.class); ArgumentCaptor<Region> regionArgumentCaptor = ArgumentCaptor.forClass(Region.class); awsClientProperties.applyClientRegionConfiguration(mockS3ClientBuilder); Mockito.verify(mockS3ClientBuilder).region(regionArgumentCaptor.capture()); Region region = regionArgumentCaptor.getValue(); assertThat(region.id()) .as("region parameter should match what is set in CLIENT_REGION") .isEqualTo("us-east-1"); }
public List<IssueDto> getStandardIssuesOnly(List<IssueDto> issues) { return filterTaintIssues(issues, false); }
@Test public void test_getStandardIssuesOnly() { List<IssueDto> standardIssues = underTest.getStandardIssuesOnly(getIssues()); assertThat(standardIssues).hasSize(3); assertThat(standardIssues.get(0).getKey()).isEqualTo("standardIssue1"); assertThat(standardIssues.get(1).getKey()).isEqualTo("standardIssue2"); assertThat(standardIssues.get(2).getKey()).isEqualTo("standardIssue3"); }
@KafkaClientInternalsDependant @VisibleForTesting Mono<Map<TopicPartition, Long>> listOffsetsUnsafe(Collection<TopicPartition> partitions, OffsetSpec offsetSpec) { if (partitions.isEmpty()) { return Mono.just(Map.of()); } Function<Collection<TopicPartition>, Mono<Map<TopicPartition, Long>>> call = parts -> { ListOffsetsResult r = client.listOffsets(parts.stream().collect(toMap(tp -> tp, tp -> offsetSpec))); Map<TopicPartition, KafkaFuture<ListOffsetsResultInfo>> perPartitionResults = new HashMap<>(); parts.forEach(p -> perPartitionResults.put(p, r.partitionResult(p))); return toMonoWithExceptionFilter(perPartitionResults, UnknownTopicOrPartitionException.class) .map(offsets -> offsets.entrySet().stream() // filtering partitions for which offsets were not found .filter(e -> e.getValue().offset() >= 0) .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset()))); }; return partitionCalls( partitions, 200, call, mapMerger() ); }
@Test void testListOffsetsUnsafe() { String topic = UUID.randomUUID().toString(); createTopics(new NewTopic(topic, 2, (short) 1)); // sending messages to have non-zero offsets for tp try (var producer = KafkaTestProducer.forKafka(kafka)) { producer.send(new ProducerRecord<>(topic, 1, "k", "v")); producer.send(new ProducerRecord<>(topic, 1, "k", "v")); } var requestedPartitions = List.of( new TopicPartition(topic, 0), new TopicPartition(topic, 1) ); StepVerifier.create(reactiveAdminClient.listOffsetsUnsafe(requestedPartitions, OffsetSpec.earliest())) .assertNext(offsets -> { assertThat(offsets) .hasSize(2) .containsEntry(new TopicPartition(topic, 0), 0L) .containsEntry(new TopicPartition(topic, 1), 0L); }) .verifyComplete(); StepVerifier.create(reactiveAdminClient.listOffsetsUnsafe(requestedPartitions, OffsetSpec.latest())) .assertNext(offsets -> { assertThat(offsets) .hasSize(2) .containsEntry(new TopicPartition(topic, 0), 0L) .containsEntry(new TopicPartition(topic, 1), 2L); }) .verifyComplete(); }
public static String format(String str, Object... args) { // TODO(chaokunyang) optimize performance. // TODO(chaokunyang) support `$xxx`. StringBuilder builder = new StringBuilder(str); if (args.length % 2 != 0) { throw new IllegalArgumentException( "args length must be multiple of 2, but get " + args.length); } Map<String, String> values = new HashMap<>(); for (int i = 0; i < args.length; i += 2) { values.put(args[i].toString(), args[i + 1].toString()); } for (Map.Entry<String, String> entry : values.entrySet()) { int start; String pattern = "${" + entry.getKey() + "}"; String value = entry.getValue(); // Replace every occurrence of %(key) with value while ((start = builder.indexOf(pattern)) != -1) { builder.replace(start, start + pattern.length(), value); } } return builder.toString(); }
@Test public void testFormat() { assertEquals(StringUtils.format("${a}, ${b}", "a", 1, "b", "abc"), "1, abc"); }
@Override public Promise<PooledConnection> acquire( EventLoop eventLoop, CurrentPassport passport, AtomicReference<? super InetAddress> selectedHostAddr) { if (draining) { throw new IllegalStateException("Attempt to acquire connection while draining"); } requestConnCounter.increment(); updateServerStatsOnAcquire(); Promise<PooledConnection> promise = eventLoop.newPromise(); // Try getting a connection from the pool. final PooledConnection conn = tryGettingFromConnectionPool(eventLoop); if (conn != null) { // There was a pooled connection available, so use this one. reusePooledConnection(passport, selectedHostAddr, conn, promise); } else { // connection pool empty, create new connection using client connection factory. tryMakingNewConnection(eventLoop, promise, passport, selectedHostAddr); } return promise; }
@Test void acquireNewConnection() throws InterruptedException, ExecutionException { CurrentPassport currentPassport = CurrentPassport.create(); Promise<PooledConnection> promise = pool.acquire(CLIENT_EVENT_LOOP, currentPassport, new AtomicReference<>()); PooledConnection connection = promise.sync().get(); assertEquals(1, requestConnCounter.count()); assertEquals(1, createNewConnCounter.count()); assertNotNull(currentPassport.findState(PassportState.ORIGIN_CH_CONNECTING)); assertNotNull(currentPassport.findState(PassportState.ORIGIN_CH_CONNECTED)); assertEquals(1, createConnSucceededCounter.count()); assertEquals(1, connsInUse.get()); // check state on PooledConnection - not all thread safe CLIENT_EVENT_LOOP .submit(() -> { checkChannelState(connection, currentPassport, 1); }) .sync(); }
public int remap(int var, int size) { if ((var & REMAP_FLAG) != 0) { return unmask(var); } int offset = var - argsSize; if (offset < 0) { // self projection for method arguments return var; } if (offset >= mapping.length) { mapping = Arrays.copyOf(mapping, Math.max(mapping.length * 2, offset + 1)); } int mappedVar = mapping[offset]; int unmasked = unmask(mappedVar); boolean isRemapped = ((mappedVar & REMAP_FLAG) != 0); if (size == 2) { if ((mappedVar & DOUBLE_SLOT_FLAG) == 0) { // no double slot mapping over an int slot; // must re-map unless the int slot is the last used one or there is a free double-ext slot isRemapped = false; } } else { // size == 1 if ((mappedVar & DOUBLE_SLOT_FLAG_2) != 0) { // no mapping over a previously 2-slot value isRemapped = false; } else if ((mappedVar & DOUBLE_SLOT_FLAG) != 0) { // the previously second part of the double slot is free to reuse mapping[unmasked + 1] = (unmasked + 1) | REMAP_FLAG; } } if (!isRemapped) { mappedVar = remapVar(newVarIdxInternal(size), size); setMapping(offset, mappedVar, size); } unmasked = unmask(mappedVar); // adjust the mapping pointer if remapping with variable occupying 2 slots nextMappedVar = Math.max(unmasked + size, nextMappedVar); return unmasked; }
@Test public void remapMethodArguments() { int numArgs = 4; VariableMapper mapper = new VariableMapper(numArgs); assertEquals(0, mapper.remap(0, 1)); assertEquals(1, mapper.remap(1, 2)); assertEquals(3, mapper.remap(3, 1)); }
@Override public ImmutableSet<String> attributes(File file) { return userDefinedAttributes(file); }
@Test public void testInitialAttributes() { // no initial attributes assertThat(ImmutableList.copyOf(file.getAttributeKeys())).isEmpty(); assertThat(provider.attributes(file)).isEmpty(); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindVarargsManyOdd() { // Given: givenFunctions( function(EXPECTED, 2, INT, INT, STRING_VARARGS, STRING, INT) ); // When: final KsqlScalarFunction fun = udfIndex .getFunction(ImmutableList.of( SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.INTEGER) )); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public Result check(IndexSetTemplate indexSetTemplate) { return indexSetTemplateRequirements.stream() .sorted(Comparator.comparing(IndexSetTemplateRequirement::priority)) .map(indexSetTemplateRequirement -> indexSetTemplateRequirement.check(indexSetTemplate)) .filter(result -> !result.fulfilled()) .findFirst() .orElse(new Result(true, "")); }
@Test void testRequirement2NotFulfilled() { Result expectedResult = new Result(false, "r2"); when(requirement1.check(any())).thenReturn(new Result(true, "")); when(requirement2.check(any())).thenReturn(expectedResult); Result result = underTest.check(indexSetTemplate); assertThat(result).isEqualTo(expectedResult); requirements.verify(requirement1).check(any()); requirements.verify(requirement2).check(any()); }
@Override public synchronized int read() throws IOException { checkNotClosed(); if (finished) { return -1; } file.readLock().lock(); try { int b = file.read(pos++); // it's ok for pos to go beyond size() if (b == -1) { finished = true; } else { file.setLastAccessTime(fileSystemState.now()); } return b; } finally { file.readLock().unlock(); } }
@Test public void testRead_partialArray() throws IOException { JimfsInputStream in = newInputStream(1, 2, 3, 4, 5, 6, 7, 8); byte[] bytes = new byte[12]; assertThat(in.read(bytes, 0, 8)).isEqualTo(8); assertArrayEquals(bytes(1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0), bytes); assertEmpty(in); }
@Override public PageResult<NotifyMessageDO> getNotifyMessagePage(NotifyMessagePageReqVO pageReqVO) { return notifyMessageMapper.selectPage(pageReqVO); }
@Test public void testGetNotifyMessagePage() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setTemplateCode("test_01"); o.setTemplateType(10); o.setCreateTime(buildTime(2022, 1, 2)); o.setTemplateParams(randomTemplateParams()); }); notifyMessageMapper.insert(dbNotifyMessage); // 测试 userId 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L))); // 测试 userType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 templateCode 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setTemplateCode("test_11"))); // 测试 templateType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setTemplateType(20))); // 测试 createTime 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setCreateTime(buildTime(2022, 2, 1)))); // 准备参数 NotifyMessagePageReqVO reqVO = new NotifyMessagePageReqVO(); reqVO.setUserId(1L); reqVO.setUserType(UserTypeEnum.ADMIN.getValue()); reqVO.setTemplateCode("est_01"); reqVO.setTemplateType(10); reqVO.setCreateTime(buildBetweenTime(2022, 1, 1, 2022, 1, 10)); // 调用 PageResult<NotifyMessageDO> pageResult = notifyMessageService.getNotifyMessagePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbNotifyMessage, pageResult.getList().get(0)); }
public boolean matchesBeacon(Beacon beacon) { // All identifiers must match, or the corresponding region identifier must be null. for (int i = mIdentifiers.size(); --i >= 0; ) { final Identifier identifier = mIdentifiers.get(i); Identifier beaconIdentifier = null; if (i < beacon.mIdentifiers.size()) { beaconIdentifier = beacon.getIdentifier(i); } if ((beaconIdentifier == null && identifier != null) || (beaconIdentifier != null && identifier != null && !identifier.equals(beaconIdentifier))) { return false; } } if (mBluetoothAddress != null && !mBluetoothAddress.equalsIgnoreCase(beacon.mBluetoothAddress)) { return false; } return true; }
@Test public void testBeaconMatchesRegionWithSameIdentifier1And2() { Beacon beacon = new AltBeacon.Builder().setId1("1").setId2("2").setId3("3").setRssi(4) .setBeaconTypeCode(5).setTxPower(6).setBluetoothAddress("1:2:3:4:5:6").build(); Region region = new Region("myRegion", Identifier.parse("1"), Identifier.parse("2"), null); assertTrue("Beacon should match region with first two identifiers the same", region.matchesBeacon(beacon)); }
public static void register(Observer observer) { register(SubjectType.SPRING_CONTENT_REFRESHED.name(), observer); }
@Test public void testSubjectTypeNameRegister() { AbstractSubjectCenter.register(AbstractSubjectCenter.SubjectType.THREAD_POOL_DYNAMIC_REFRESH.name(), subjectNotifyListener); List<Observer> list = OBSERVERS_MAP.get(AbstractSubjectCenter.SubjectType.THREAD_POOL_DYNAMIC_REFRESH.name()); Assert.assertNotNull(list); Assert.assertEquals(1, list.size()); Assert.assertSame(subjectNotifyListener, list.get(0)); OBSERVERS_MAP.clear(); }
@Override public Mono<RemoveDeviceResponse> removeDevice(final RemoveDeviceRequest request) { if (request.getId() == Device.PRIMARY_ID) { throw Status.INVALID_ARGUMENT.withDescription("Cannot remove primary device").asRuntimeException(); } final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); if (authenticatedDevice.deviceId() != Device.PRIMARY_ID && request.getId() != authenticatedDevice.deviceId()) { throw Status.PERMISSION_DENIED .withDescription("Linked devices cannot remove devices other than themselves") .asRuntimeException(); } final byte deviceId = DeviceIdUtil.validate(request.getId()); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(accountsManager.removeDevice(account, deviceId))) .thenReturn(RemoveDeviceResponse.newBuilder().build()); }
@Test void removeDevice() { final byte deviceId = 17; final RemoveDeviceResponse ignored = authenticatedServiceStub().removeDevice(RemoveDeviceRequest.newBuilder() .setId(deviceId) .build()); verify(accountsManager).removeDevice(authenticatedAccount, deviceId); }
public void updateSubscriptionGroupConfig(final SubscriptionGroupConfig config) { Map<String, String> newAttributes = request(config); Map<String, String> currentAttributes = current(config.getGroupName()); Map<String, String> finalAttributes = AttributeUtil.alterCurrentAttributes( this.subscriptionGroupTable.get(config.getGroupName()) == null, SubscriptionGroupAttributes.ALL, ImmutableMap.copyOf(currentAttributes), ImmutableMap.copyOf(newAttributes)); config.setAttributes(finalAttributes); SubscriptionGroupConfig old = putSubscriptionGroupConfig(config); if (old != null) { log.info("update subscription group config, old: {} new: {}", old, config); } else { log.info("create new subscription group, {}", config); } updateDataVersion(); this.persist(); }
@Test public void testUpdateAndCreateSubscriptionGroupInRocksdb() { if (notToBeExecuted()) { return; } group += System.currentTimeMillis(); updateSubscriptionGroupConfig(); }
public ConcurrentMap<String, String> getHostMapping() { return hostMapping; }
@Test public void uriHostMappingEmpty() { final TiConfiguration tiConf = TDBSourceOptions.getTiConfiguration("http://0.0.0.0:2347", "", new HashMap<>()); UriHostMapping uriHostMapping = (UriHostMapping) tiConf.getHostMapping(); assertEquals(uriHostMapping.getHostMapping(), null); }
public CompletableFuture<TopicExistsInfo> checkTopicExists(TopicName topic) { return pulsar.getBrokerService() .fetchPartitionedTopicMetadataAsync(TopicName.get(topic.toString())) .thenCompose(metadata -> { if (metadata.partitions > 0) { return CompletableFuture.completedFuture( TopicExistsInfo.newPartitionedTopicExists(metadata.partitions)); } return checkNonPartitionedTopicExists(topic) .thenApply(b -> b ? TopicExistsInfo.newNonPartitionedTopicExists() : TopicExistsInfo.newTopicNotExists()); }); }
@Test(dataProvider = "topicDomain") public void testCheckTopicExists(String topicDomain) throws Exception { String topic = topicDomain + "://prop/ns-abc/" + UUID.randomUUID(); admin.topics().createNonPartitionedTopic(topic); Awaitility.await().untilAsserted(() -> { assertTrue(pulsar.getNamespaceService().checkTopicExists(TopicName.get(topic)).get().isExists()); }); String partitionedTopic = topicDomain + "://prop/ns-abc/" + UUID.randomUUID(); admin.topics().createPartitionedTopic(partitionedTopic, 5); Awaitility.await().untilAsserted(() -> { assertTrue(pulsar.getNamespaceService().checkTopicExists(TopicName.get(partitionedTopic)).get().isExists()); assertTrue(pulsar.getNamespaceService() .checkTopicExists(TopicName.get(partitionedTopic + "-partition-2")).get().isExists()); }); }
void handleFinish(Resp response, Span span) { if (response == null) throw new NullPointerException("response == null"); if (span.isNoop()) return; if (response.error() != null) { span.error(response.error()); // Ensures MutableSpan.error() for SpanHandler } try { parseResponse(response, span); } catch (Throwable t) { propagateIfFatal(t); Platform.get().log("error parsing response {0}", response, t); } finally { long finishTimestamp = response.finishTimestamp(); if (finishTimestamp == 0L) { span.finish(); } else { span.finish(finishTimestamp); } } }
@Test void handleFinish_finishedEvenIfParsingThrows() { when(span.isNoop()).thenReturn(false); doThrow(new RuntimeException()).when(responseParser).parse(response, context, spanCustomizer); handler.handleFinish(response, span); verify(span).finish(); }
public double pairingThreshold() { /* * We use 7000 because this equals 7 seconds (in milliseconds). Radar hits are normally * updated every 13 seconds or less. Thus, we know any two aircraft will have radar hits * within 6.5 seconds of each other. 6500 is rounded up to 7000 because...why not. */ final double timeComponent = timeCoef() * 7_000; final double distComponent = distCoef() * trackPairingDistanceInNM() * Spherical.feetPerNM(); final double pairingThreshold = timeComponent + distComponent; return pairingThreshold; }
@Test public void testDerivedPairThresholdReflectsDistanceInNM() { double TOLERANCE = 0.0001; PairingConfig noDistProps = new PairingConfig(timeWindow, 5, 1, 1); assertEquals( noDistProps.pairingThreshold(), 7000.0 + 5.0 * Spherical.feetPerNM(), TOLERANCE ); }
public void setGroup(String group) { this.group = group; }
@Test void testSetGroup() { String group = "default"; metadataOperation.setGroup(group); assertEquals(metadataOperation.getGroup(), group); }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void cabbageContractTwoCabbagesGrowing() { final long unixNow = Instant.now().getEpochSecond(); final long expected1 = unixNow + 60; final long expected2 = unixNow + 120; // Get the two allotment patches final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773); final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774); assertNotNull(patch1); assertNotNull(patch2); // Specify the two allotment patches when(farmingTracker.predictPatch(patch1)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.GROWING, expected1, 2, 3)); when(farmingTracker.predictPatch(patch2)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.GROWING, expected2, 1, 3)); farmingContractManager.setContract(Produce.CABBAGE); assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary()); assertEquals(CropState.GROWING, farmingContractManager.getContractCropState()); // Prefer closest estimated time assertEquals(expected1, farmingContractManager.getCompletionTime()); }
@Override public Publisher<Exchange> to(String uri, Object data) { String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> { try { String uuid = context.getUuidGenerator().generateUuid(); RouteBuilder.addRoutes(context, rb -> rb.from("reactive-streams:" + uuid).to(camelUri)); return uuid; } catch (Exception e) { throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e); } }); return toStream(streamName, data); }
@Test public void testToWithExchange() throws Exception { context.start(); AtomicInteger value = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(1); Flowable.just(1, 2, 3).flatMap(e -> crs.to("bean:hello", e)).map(Exchange::getMessage).map(e -> e.getBody(String.class)) .doOnNext(res -> assertEquals("Hello " + value.incrementAndGet(), res)).doOnNext(res -> latch.countDown()) .subscribe(); assertTrue(latch.await(2, TimeUnit.SECONDS)); }
public static boolean isValidCloudLabel(String value) { Matcher m = VALID_CLOUD_LABEL_PATTERN.matcher(value); return m.find(); }
@Test @SuppressWarnings("LoopOverCharArray") public void testIsValidCloudLabel() { // A dataflow job ID. // Lowercase letters, numbers, underscores and hyphens are allowed. String testStr = "2020-06-29_15_26_09-12838749047888422749"; assertTrue(BigQueryIOMetadata.isValidCloudLabel(testStr)); // At least one character. testStr = "0"; assertTrue(BigQueryIOMetadata.isValidCloudLabel(testStr)); // Up to 63 characters. testStr = "0123456789abcdefghij0123456789abcdefghij0123456789abcdefghij012"; assertTrue(BigQueryIOMetadata.isValidCloudLabel(testStr)); // Lowercase letters allowed testStr = "abcdefghijklmnopqrstuvwxyz"; for (char testChar : testStr.toCharArray()) { assertTrue(BigQueryIOMetadata.isValidCloudLabel(String.valueOf(testChar))); } // Empty strings not allowed. testStr = ""; assertFalse(BigQueryIOMetadata.isValidCloudLabel(testStr)); // 64 or more characters not allowed. testStr = "0123456789abcdefghij0123456789abcdefghij0123456789abcdefghij0123"; assertFalse(BigQueryIOMetadata.isValidCloudLabel(testStr)); // Uppercase letters not allowed testStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; for (char testChar : testStr.toCharArray()) { assertFalse(BigQueryIOMetadata.isValidCloudLabel(String.valueOf(testChar))); } // Special characters besides hyphens are not allowed testStr = "!@#$%^&*()+=[{]};:\'\"\\|,<.>?/`~"; for (char testChar : testStr.toCharArray()) { assertFalse(BigQueryIOMetadata.isValidCloudLabel(String.valueOf(testChar))); } }
@Subscribe public synchronized void renew(final InstanceOnlineEvent event) { contextManager.getComputeNodeInstanceContext().addComputeNodeInstance(contextManager.getPersistServiceFacade() .getComputeNodePersistService().loadComputeNodeInstance(event.getInstanceMetaData())); }
@Test void assertRenewInstanceOnlineEvent() { InstanceMetaData instanceMetaData1 = new ProxyInstanceMetaData("foo_instance_3307", 3307); InstanceOnlineEvent instanceOnlineEvent1 = new InstanceOnlineEvent(instanceMetaData1); subscriber.renew(instanceOnlineEvent1); assertThat(contextManager.getComputeNodeInstanceContext().getAllClusterInstances().size(), is(1)); assertThat(((CopyOnWriteArrayList<ComputeNodeInstance>) contextManager.getComputeNodeInstanceContext().getAllClusterInstances()).get(0).getMetaData(), is(instanceMetaData1)); InstanceMetaData instanceMetaData2 = new ProxyInstanceMetaData("foo_instance_3308", 3308); InstanceOnlineEvent instanceOnlineEvent2 = new InstanceOnlineEvent(instanceMetaData2); subscriber.renew(instanceOnlineEvent2); assertThat(contextManager.getComputeNodeInstanceContext().getAllClusterInstances().size(), is(2)); assertThat(((CopyOnWriteArrayList<ComputeNodeInstance>) contextManager.getComputeNodeInstanceContext().getAllClusterInstances()).get(1).getMetaData(), is(instanceMetaData2)); subscriber.renew(instanceOnlineEvent1); assertThat(contextManager.getComputeNodeInstanceContext().getAllClusterInstances().size(), is(2)); assertThat(((CopyOnWriteArrayList<ComputeNodeInstance>) contextManager.getComputeNodeInstanceContext().getAllClusterInstances()).get(1).getMetaData(), is(instanceMetaData1)); }
@VisibleForTesting @Nullable public UUID getLeaderSessionID(String componentId) { synchronized (lock) { return leaderContenderRegistry.containsKey(componentId) ? confirmedLeaderInformation .forComponentIdOrEmpty(componentId) .getLeaderSessionID() : null; } }
@Test void testOnGrantLeadershipIsIgnoredAfterLeaderElectionClose() throws Exception { new Context() { { runTestWithSynchronousEventHandling( () -> { closeLeaderElectionInBothContexts(); grantLeadership(); applyToBothContenderContexts( ctx -> { assertThat( leaderElectionService.getLeaderSessionID( ctx.componentId)) .as( "The grant event shouldn't have been processed by the LeaderElectionService.") .isNull(); assertThat(ctx.contender.getLeaderSessionID()) .as( "The grant event shouldn't have been forwarded to the contender.") .isNull(); }); }); } }; }
public static String unescapeQuotedString(String string) { StringBuilder sb = new StringBuilder(string); for (int i = 0; i < sb.length(); i++) { if (sb.charAt(i) == '\\') { sb.deleteCharAt(i); if (i == sb.length()) { throw new IllegalArgumentException("Parse error" + string); } switch (sb.charAt(i)) { case 'n' -> sb.setCharAt(i, '\n'); case 'r' -> sb.setCharAt(i, '\r'); case 't' -> sb.setCharAt(i, '\t'); case 'f' -> sb.setCharAt(i, '\f'); case 'x' -> { if (i + 2 >= sb.length()) { throw new IllegalArgumentException("Could not parse hex value " + string); } sb.setCharAt(i, (char) Integer.parseInt(sb.substring(i + 1, i + 3), 16)); sb.delete(i + 1, i + 3); } case '\\' -> sb.setCharAt(i, '\\'); } } } if (sb.length() > 0 && (sb.charAt(0) == '"') && sb.charAt(sb.length() - 1) == '"') { sb.deleteCharAt(sb.length() - 1);//remove last quote if (sb.length() > 0) { sb.deleteCharAt(0); //remove first quote } } return sb.toString(); }
@Test void testUnescapedQuotedStringExceptions() { assertThrows(IllegalArgumentException.class, () -> StringNode.unescapeQuotedString("foo\\")); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatHoppingWindow() { // Given: final String statementString = "CREATE STREAM S AS SELECT ITEMID, COUNT(*) FROM ORDERS WINDOW HOPPING (SIZE 20 SECONDS, ADVANCE BY 5 SECONDS) GROUP BY ITEMID;"; final Statement statement = parseSingle(statementString); final String result = SqlFormatter.formatSql(statement); assertThat(result, is("CREATE STREAM S AS SELECT\n" + " ITEMID,\n" + " COUNT(*)\n" + "FROM ORDERS ORDERS\n" + "WINDOW HOPPING ( SIZE 20 SECONDS , ADVANCE BY 5 SECONDS ) \n" + "GROUP BY ITEMID\n" + "EMIT CHANGES")); }
@SuppressWarnings("unchecked") @Override public boolean canHandleReturnType(Class returnType) { return rxSupportedTypes.stream() .anyMatch(classType -> classType.isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(rxJava2CircuitBreakerAspectExt.canHandleReturnType(Flowable.class)).isTrue(); assertThat(rxJava2CircuitBreakerAspectExt.canHandleReturnType(Single.class)).isTrue(); }
@Override public Mono<ExtensionStore> delete(String name, Long version) { return repository.findById(name) .flatMap(extensionStore -> { // reset the version extensionStore.setVersion(version); return repository.delete(extensionStore).thenReturn(extensionStore); }); }
@Test void shouldDeleteSuccessfully() { var expectedExtension = new ExtensionStore("/registry/posts/hello-halo", "hello halo".getBytes(), 2L); when(repository.findById(anyString())).thenReturn(Mono.just(expectedExtension)); when(repository.delete(any())).thenReturn(Mono.empty()); var deletedExtension = client.delete("/registry/posts/hello-halo", 2L).block(); assertEquals(expectedExtension, deletedExtension); }
public boolean isEmpty() { return value.isEmpty(); }
@Test public void testIsEmpty() { assertTrue("[] is empty", ByteKey.EMPTY.isEmpty()); assertFalse("[00]", ByteKey.of(0).isEmpty()); }
void setPath( FileDialogOperation fileDialogOperation, FileObject fileObject, String filePath ) throws KettleException { try { fileDialogOperation.setPath( fileObject.isFile() ? filePath : null ); } catch ( FileSystemException fse ) { throw new KettleException( "failed to check isFile in setPath()", fse ); } }
@Test public void testSetPath() throws Exception { // TEST : is file FileDialogOperation fileDialogOperation1 = createFileDialogOperation(); FileObject fileObject1 = mock( FileObject.class ); String absoluteFilePath = "/home/someuser/somedir"; when( fileObject1.isFile() ).thenReturn( true ); when( fileObject1.toString() ).thenReturn( absoluteFilePath ); testInstance.setPath( fileDialogOperation1, fileObject1, absoluteFilePath ); assertEquals( absoluteFilePath, fileDialogOperation1.getPath() ); // TEST : is not file FileDialogOperation fileDialogOperation2 = createFileDialogOperation(); FileObject fileObject2 = mock( FileObject.class ); when( fileObject2.isFile() ).thenReturn( false ); testInstance.setPath( fileDialogOperation2, fileObject2, absoluteFilePath ); assertNull( fileDialogOperation2.getPath() ); }
private static void sizeIncrementOfKey(int[] size, String key, Map<String, Integer> keyTable) { int keyLength = key.getBytes(UTF_8).length; Integer idx = keyTable.get(key); if (idx == null) { size[0] += 1 + VarInt.getLength(keyLength) + keyLength; // encoding of key in key table } size[1] += 1 + VarInt.getLength(idx != null ? idx : keyTable.size()); // key reference in record }
@Test public void testSizeIncrementOfKey() { int increment = aggregator.sizeIncrement(PARTITION_KEY, null, null); System.out.println(increment); aggregator.addRecord(PARTITION_KEY, null, null); // same partition key is added only once System.out.println(aggregator.sizeIncrement(PARTITION_KEY, null, null)); assertThat(increment - aggregator.sizeIncrement(PARTITION_KEY, null, null)) .isEqualTo(PARTITION_KEY_OVERHEAD); increment = aggregator.sizeIncrement(PARTITION_KEY, HASH_KEY, null); aggregator.addRecord(PARTITION_KEY, HASH_KEY, null); // same hash key is added only once assertThat(increment - aggregator.sizeIncrement(PARTITION_KEY, HASH_KEY, null)) .isEqualTo(HASH_KEY_OVERHEAD); }
public Path getParent() { return getParentUtil(); }
@Test (timeout = 30000) public void testParent() { assertEquals(new Path("/foo"), new Path("/foo/bar").getParent()); assertEquals(new Path("foo"), new Path("foo/bar").getParent()); assertEquals(new Path("/"), new Path("/foo").getParent()); assertEquals(null, new Path("/").getParent()); if (Path.WINDOWS) { assertEquals(new Path("c:/"), new Path("c:/foo").getParent()); } }
@Override public Result apply(String action, Class<? extends Validatable> aClass, String resource, String resourceToOperateWithin) { if (matchesAction(action) && matchesType(aClass) && matchesResource(resource)) { return Result.ALLOW; } if (isRequestForElasticAgentProfiles(aClass) && matchesAction(action) && matchesResource(resourceToOperateWithin)) { return Result.ALLOW; } return Result.SKIP; }
@Test void forViewOfWildcardDefinedClusterProfile() { Allow directive = new Allow("view", "cluster_profile", "team1_*"); Result viewAllElasticAgentProfiles = directive.apply("view", ElasticProfile.class, "*", null); Result viewAllElasticAgentProfilesUnderTeam1 = directive.apply("view", ElasticProfile.class, "*", "team1_uat"); Result viewAllElasticAgentProfilesUnderTeam2 = directive.apply("view", ElasticProfile.class, "*", "team2_uat"); Result viewAllClusterProfiles = directive.apply("view", ClusterProfile.class, "*", null); Result viewTeam1ClusterProfile = directive.apply("view", ClusterProfile.class, "team1_uat", null); Result viewTeam2ClusterProfile = directive.apply("view", ClusterProfile.class, "team2_uat", null); Result administerAllElasticAgentProfiles = directive.apply("administer", ElasticProfile.class, "*", null); Result administerAllElasticAgentProfilesUnderTeam1 = directive.apply("administer", ElasticProfile.class, "*", "team1_uat"); Result administerAllElasticAgentProfilesUnderTeam2 = directive.apply("administer", ElasticProfile.class, "*", "team2_uat"); Result administerAllClusterProfiles = directive.apply("administer", ClusterProfile.class, "*", null); Result administerTeam1ClusterProfile = directive.apply("administer", ClusterProfile.class, "team1_uat", null); Result administerTeam2ClusterProfile = directive.apply("administer", ClusterProfile.class, "team2_uat", null); assertThat(viewAllElasticAgentProfiles).isEqualTo(Result.SKIP); assertThat(viewAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.ALLOW); assertThat(viewAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP); assertThat(viewAllClusterProfiles).isEqualTo(Result.SKIP); assertThat(viewTeam1ClusterProfile).isEqualTo(Result.ALLOW); assertThat(viewTeam2ClusterProfile).isEqualTo(Result.SKIP); assertThat(administerAllElasticAgentProfiles).isEqualTo(Result.SKIP); assertThat(administerAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.SKIP); assertThat(administerAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP); assertThat(administerAllClusterProfiles).isEqualTo(Result.SKIP); assertThat(administerTeam1ClusterProfile).isEqualTo(Result.SKIP); assertThat(administerTeam2ClusterProfile).isEqualTo(Result.SKIP); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } else if (!(obj instanceof Job)) { return false; } Job other = (Job) obj; return this.hasAlpha == other.hasAlpha && this.tile.equals(other.tile); }
@Test public void equalsTest() { Job job1 = new Job(new Tile(0, 1, (byte) 2, TILE_SIZE), false); Job job2 = new Job(new Tile(0, 1, (byte) 2, TILE_SIZE), false); Job job3 = new Job(new Tile(0, 0, (byte) 0, TILE_SIZE), false); TestUtils.equalsTest(job1, job2); Assert.assertNotEquals(job1, job3); Assert.assertNotEquals(job3, job1); Assert.assertNotEquals(job1, new Object()); }
@Override public void run(Namespace namespace, Liquibase liquibase) throws Exception { final CheckSum checkSum = liquibase.calculateCheckSum("migrations.xml", namespace.<String>getList("id").get(0), namespace.<String>getList("author").get(0)); checkSumConsumer.accept(checkSum); }
@Test void testRun() throws Exception { final AtomicBoolean checkSumVerified = new AtomicBoolean(); migrateCommand.setCheckSumConsumer(checkSum -> { assertThat(checkSum).isEqualTo(CheckSum.parse("9:6e43b880df9a2b41d436026d8a09c457")); checkSumVerified.set(true); }); migrateCommand.run(null, new Namespace(Map.of( "id", List.of("2"), "author", List.of("db_dev"))), MigrationTestSupport.createConfiguration()); assertThat(checkSumVerified).isTrue(); }
@Nullable @Override public BufferAndBacklog getNextBuffer() throws IOException { if (stopSendingData || !findCurrentNettyPayloadQueue()) { return null; } NettyPayloadManager nettyPayloadManager = nettyPayloadManagers.get(managerIndexContainsCurrentSegment); Optional<Buffer> nextBuffer = readNettyPayload(nettyPayloadManager); if (nextBuffer.isPresent()) { stopSendingData = nextBuffer.get().getDataType() == END_OF_SEGMENT; if (stopSendingData) { managerIndexContainsCurrentSegment = -1; } currentSequenceNumber++; return BufferAndBacklog.fromBufferAndLookahead( nextBuffer.get(), getDataType(nettyPayloadManager.peek()), getBacklog(), currentSequenceNumber); } return null; }
@Test void testGetNextBuffer() throws IOException { checkBufferAndBacklog(tieredStorageResultSubpartitionView.getNextBuffer(), 0); tieredStorageResultSubpartitionView.notifyRequiredSegmentId(0, 1); assertThat(availabilityListener).isDone(); checkBufferAndBacklog(tieredStorageResultSubpartitionView.getNextBuffer(), 0); assertThat(tieredStorageResultSubpartitionView.getNextBuffer()).isNull(); }
@GET @Path("/{entityType}/{entityId}") @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 /* , MediaType.APPLICATION_XML */}) public TimelineEntity getEntity( @Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam("entityType") String entityType, @PathParam("entityId") String entityId, @QueryParam("fields") String fields) { init(res); TimelineEntity entity = null; try { entity = timelineDataManager.getEntity( parseStr(entityType), parseStr(entityId), parseFieldsStr(fields, ","), getUser(req)); } catch (YarnException e) { // The user doesn't have the access to override the existing domain. LOG.info(e.getMessage(), e); throw new ForbiddenException(e); } catch (IllegalArgumentException e) { throw new BadRequestException(e); } catch (Exception e) { LOG.error("Error getting entity", e); throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR); } if (entity == null) { throw new NotFoundException("Timeline entity " + new EntityIdentifier(parseStr(entityId), parseStr(entityType)) + " is not found"); } return entity; }
@Test void testGetEntity() throws Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("timeline") .path("type_1").path("id_1") .accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); TimelineEntity entity = response.getEntity(TimelineEntity.class); assertNotNull(entity); assertEquals("id_1", entity.getEntityId()); assertEquals("type_1", entity.getEntityType()); assertEquals(123L, entity.getStartTime().longValue()); assertEquals(2, entity.getEvents().size()); assertEquals(4, entity.getPrimaryFilters().size()); assertEquals(4, entity.getOtherInfo().size()); }
@Override public void beforeMethod(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final String pluginType) { switch (method.getName()) { case "channelActive": MetricsCollectorRegistry.<GaugeMetricsCollector>get(config, pluginType).inc(); break; case "channelInactive": MetricsCollectorRegistry.<GaugeMetricsCollector>get(config, pluginType).dec(); break; default: break; } }
@Test void assertCountCurrentConnections() { TargetAdviceObjectFixture targetObject = new TargetAdviceObjectFixture(); advice.beforeMethod(targetObject, mockMethod("channelActive"), new Object[]{}, "FIXTURE"); advice.beforeMethod(targetObject, mockMethod("channelActive"), new Object[]{}, "FIXTURE"); advice.beforeMethod(targetObject, mockMethod("channelInactive"), new Object[]{}, "FIXTURE"); assertThat(MetricsCollectorRegistry.get(config, "FIXTURE").toString(), is("1")); }
public ParsedQuery parse(final String query) throws ParseException { final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER); parser.setSplitOnWhitespace(true); parser.setAllowLeadingWildcard(allowLeadingWildcard); final Query parsed = parser.parse(query); final ParsedQuery.Builder builder = ParsedQuery.builder().query(query); builder.tokensBuilder().addAll(parser.getTokens()); final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup()); parsed.visit(visitor); builder.termsBuilder().addAll(visitor.getParsedTerms()); return builder.build(); }
@Test void getFieldNamesExist() throws ParseException { final ParsedQuery fields = parser.parse("foo:bar AND _exists_:lorem"); assertThat(fields.allFieldNames()).contains("foo", "lorem"); }
@Override public String pluginNamed() { return PluginEnum.REQUEST.getName(); }
@Test public void testPluginNamed() { assertEquals(this.requestPluginHandler.pluginNamed(), "request"); }
public static Map<String, List<String>> decodeParams(String paramsStr, String charset) { return decodeParams(paramsStr, charset, false); }
@Test public void decodeParamsTest() { final String paramsStr = "uuuu=0&a=b&c=%3F%23%40!%24%25%5E%26%3Ddsssss555555"; final Map<String, List<String>> map = HttpUtil.decodeParams(paramsStr, CharsetUtil.UTF_8); assertEquals("0", map.get("uuuu").get(0)); assertEquals("b", map.get("a").get(0)); assertEquals("?#@!$%^&=dsssss555555", map.get("c").get(0)); }
public long fingerprint() { return identifier; }
@Test public void testConstruction() { final IntentId id1 = new IntentId(987654321L); assertEquals(id1.fingerprint(), 987654321L); final IntentId emptyId = new IntentId(); assertEquals(emptyId.fingerprint(), 0L); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { boolean satisfied = false; // No trading history or no position opened, no loss if (tradingRecord != null) { Position currentPosition = tradingRecord.getCurrentPosition(); if (currentPosition.isOpened()) { Num entryPrice = currentPosition.getEntry().getNetPrice(); Num currentPrice = closePrice.getValue(index); if (currentPosition.getEntry().isBuy()) { satisfied = isBuyGainSatisfied(entryPrice, currentPrice); } else { satisfied = isSellGainSatisfied(entryPrice, currentPrice); } } } traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfiedWorksForBuy() { final TradingRecord tradingRecord = new BaseTradingRecord(Trade.TradeType.BUY); final Num tradedAmount = numOf(1); // 30% stop-gain StopGainRule rule = new StopGainRule(closePrice, numOf(30)); assertFalse(rule.isSatisfied(0, null)); assertFalse(rule.isSatisfied(1, tradingRecord)); // Enter at 108 tradingRecord.enter(2, numOf(108), tradedAmount); assertFalse(rule.isSatisfied(2, tradingRecord)); assertFalse(rule.isSatisfied(3, tradingRecord)); assertTrue(rule.isSatisfied(4, tradingRecord)); // Exit tradingRecord.exit(5); // Enter at 118 tradingRecord.enter(5, numOf(118), tradedAmount); assertFalse(rule.isSatisfied(5, tradingRecord)); assertTrue(rule.isSatisfied(6, tradingRecord)); assertTrue(rule.isSatisfied(7, tradingRecord)); }
String getAvailabilityZone() { return gcpMetadataApi.currentZone(); }
@Test public void getAvailabilityZone() { // given given(gcpMetadataApi.currentZone()).willReturn(ZONE_1); GcpConfig gcpConfig = GcpConfig.builder().build(); GcpClient gcpClient = new GcpClient(gcpMetadataApi, gcpComputeApi, gcpAuthenticator, gcpConfig); // when String result = gcpClient.getAvailabilityZone(); // then assertEquals(ZONE_1, result); }
public FileResource generateSignature(FileResource download, TempFile extensionFile, SignatureKeyPair keyPair) { var resource = new FileResource(); resource.setExtension(download.getExtension()); resource.setName(NamingUtil.toFileFormat(download.getExtension(), ".sigzip")); resource.setType(FileResource.DOWNLOAD_SIG); try (var out = new ByteArrayOutputStream()) { try (var zip = new ZipOutputStream(out)) { var sigEntry = new ZipEntry(".signature.sig"); zip.putNextEntry(sigEntry); zip.write(generateSignature(extensionFile, keyPair)); zip.closeEntry(); var manifestEntry = new ZipEntry(".signature.manifest"); zip.putNextEntry(manifestEntry); zip.write(generateSignatureManifest(extensionFile)); zip.closeEntry(); // Add dummy file to the archive because VS Code checks if it exists var dummyEntry = new ZipEntry(".signature.p7s"); zip.putNextEntry(dummyEntry); zip.write(new byte[0]); zip.closeEntry(); } resource.setContent(out.toByteArray()); } catch (IOException e) { throw new ErrorResultException("Failed to sign extension file", e); } return resource; }
@Test public void testGenerateSignature() throws IOException { var keyPair = keyPairService.generateKeyPair(); var namespace = new Namespace(); namespace.setName("foo"); var extension = new Extension(); extension.setName("bar"); extension.setNamespace(namespace); var extVersion = new ExtensionVersion(); extVersion.setVersion("1.0.0"); extVersion.setTargetPlatform("universal"); extVersion.setExtension(extension); var download = new FileResource(); download.setExtension(extVersion); var sigzipContent = new byte[0]; try ( var stream = getClass().getResource("ms-python.python-2024.7.11511013.vsix").openStream(); var extensionFile = new TempFile("ms-python", ".vsix"); var out = Files.newOutputStream(extensionFile.getPath()) ) { stream.transferTo(out); var signature = integrityService.generateSignature(download, extensionFile, keyPair); sigzipContent = signature.getContent(); } try(var temp = new TempFile("ms-python", ".sigzip")) { Files.write(temp.getPath(), sigzipContent); try ( var sigzip = new ZipFile(temp.getPath().toFile()); var expectedSigZip = new ZipFile(getClass().getResource("ms-python.python-2024.7.11511013.sigzip").getPath()) ) { expectedSigZip.stream() .forEach(expectedEntry -> { var entry = sigzip.getEntry(expectedEntry.getName()); assertNotNull(entry); if(expectedEntry.getName().equals(".signature.manifest")) { assertEquals( new String(ArchiveUtil.readEntry(expectedSigZip, expectedEntry, ObservationRegistry.NOOP)), new String(ArchiveUtil.readEntry(sigzip, entry, ObservationRegistry.NOOP)) ); } }); var entry = sigzip.getEntry(".signature.sig"); assertNotNull(entry); assertTrue(ArchiveUtil.readEntry(sigzip, entry, ObservationRegistry.NOOP).length > 0); } } }
public static SimpleAclRuleResource fromKafkaResourcePattern(ResourcePattern kafkaResourcePattern) { String resourceName; SimpleAclRuleResourceType resourceType; AclResourcePatternType resourcePattern = null; switch (kafkaResourcePattern.resourceType()) { case TOPIC: resourceName = kafkaResourcePattern.name(); resourceType = SimpleAclRuleResourceType.TOPIC; switch (kafkaResourcePattern.patternType()) { case LITERAL: resourcePattern = AclResourcePatternType.LITERAL; break; case PREFIXED: resourcePattern = AclResourcePatternType.PREFIX; break; default: throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType()); } break; case GROUP: resourceType = SimpleAclRuleResourceType.GROUP; resourceName = kafkaResourcePattern.name(); switch (kafkaResourcePattern.patternType()) { case LITERAL: resourcePattern = AclResourcePatternType.LITERAL; break; case PREFIXED: resourcePattern = AclResourcePatternType.PREFIX; break; default: throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType()); } break; case CLUSTER: resourceType = SimpleAclRuleResourceType.CLUSTER; resourceName = "kafka-cluster"; resourcePattern = AclResourcePatternType.LITERAL; break; case TRANSACTIONAL_ID: resourceType = SimpleAclRuleResourceType.TRANSACTIONAL_ID; resourceName = kafkaResourcePattern.name(); switch (kafkaResourcePattern.patternType()) { case LITERAL: resourcePattern = AclResourcePatternType.LITERAL; break; case PREFIXED: resourcePattern = AclResourcePatternType.PREFIX; break; default: throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType()); } break; default: throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType()); } return new SimpleAclRuleResource(resourceName, resourceType, resourcePattern); }
@Test public void testFromKafkaResourcePatternWithGroupResource() { // Regular group ResourcePattern kafkaGroupResourcePattern = new ResourcePattern(ResourceType.GROUP, "my-group", PatternType.LITERAL); SimpleAclRuleResource expectedGroupResourceRules = new SimpleAclRuleResource("my-group", SimpleAclRuleResourceType.GROUP, AclResourcePatternType.LITERAL); assertThat(SimpleAclRuleResource.fromKafkaResourcePattern(kafkaGroupResourcePattern), is(expectedGroupResourceRules)); // Prefixed group kafkaGroupResourcePattern = new ResourcePattern(ResourceType.GROUP, "my-", PatternType.PREFIXED); expectedGroupResourceRules = new SimpleAclRuleResource("my-", SimpleAclRuleResourceType.GROUP, AclResourcePatternType.PREFIX); assertThat(SimpleAclRuleResource.fromKafkaResourcePattern(kafkaGroupResourcePattern), is(expectedGroupResourceRules)); }