focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Nonnull public static String removeBracketsFromIpv6Address(@Nonnull final String address) { final String result; if (address.startsWith("[") && address.endsWith("]")) { result = address.substring(1, address.length()-1); try { Ipv6.parse(result); // The remainder is a valid IPv6 address. Return the original value. return result; } catch (IllegalArgumentException e) { // The remainder isn't a valid IPv6 address. Return the original value. return address; } } // Not a bracket-enclosed string. Return the original input. return address; }
@Test public void stripBracketsIpv6NoBrackets() throws Exception { // Setup test fixture. final String input = "0:0:0:0:0:0:0:1"; // Execute system under test. final String result = AuthCheckFilter.removeBracketsFromIpv6Address(input); // Verify result. assertEquals(input, result); }
public static <InputT> KeyByBuilder<InputT> of(PCollection<InputT> input) { return new Builder<>(null).of(input); }
@Test public void testBuild_ImplicitName() { final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings()); final PCollection<KV<String, Long>> counted = CountByKey.of(dataset).keyBy(s -> s).output(); final CountByKey count = (CountByKey) TestUtils.getProducer(counted); assertFalse(count.getName().isPresent()); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowOnInvalidBytes() { // Given: final KsqlJsonDeserializer<ByteBuffer> deserializer = givenDeserializerForSchema(Schema.OPTIONAL_BYTES_SCHEMA, ByteBuffer.class); final byte[] bytes = serializeJson("abc"); // When: final Exception e = assertThrows(SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes)); // Then: assertThat(e.getMessage(), containsString("Value is not a valid Base64 encoded string: abc")); }
@Override public boolean isTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (chain.length == 0 || this.trustedFingerprints.isEmpty()) { return false; } final MessageDigest sha256Digest = sha256(); // traverse up the chain until we find one whose fingerprint matches for (int i = 0; i < chain.length; i++) { final X509Certificate currentCandidate = chain[i]; final byte[] derEncoding = currentCandidate.getEncoded(); Fingerprint candidateFingerprint = new Fingerprint(sha256Digest.digest(derEncoding)); if (this.trustedFingerprints.contains(candidateFingerprint)) { final Date currentDate = dateSupplier.get(); currentCandidate.checkValidity(currentDate); // zip back down the chain and make sure everything is valid for(; i > 0; i--) { final X509Certificate signer = chain[i]; final X509Certificate signed = chain[i-1]; verifyAndValidate(signed, signer, currentDate); } return true; } } return false; }
@Test public void testIsTrustedWhenAMatchingValidCertificateIsRootOfTheChain() throws CertificateException { final CATrustedFingerprintTrustStrategy trustStrategy = new CATrustedFingerprintTrustStrategy(FINGERPRINT_ROOT, ()-> DATE_CERTS_VALID); final X509Certificate[] chain = {CERTIFICATE_SERVER, CERTIFICATE_INTERMEDIATE, CERTIFICATE_ROOT}; assertTrue(trustStrategy.isTrusted(chain, "noop")); }
@Deprecated @Override public void toXML(Object obj, OutputStream out) { super.toXML(obj, out); }
@Issue("JENKINS-5769") @Test public void unmarshalThrowableMissingField() { Level oldLevel = disableLogging(); Baz baz = new Baz(); baz.myFailure = new Exception("foo"); XStream2 xs = new XStream2(); String xml = xs.toXML(baz); baz = (Baz) xs.fromXML(xml); assertEquals("foo", baz.myFailure.getMessage()); baz = (Baz) xs.fromXML("<hudson.util.XStream2Test_-Baz><myFailure>" + "<missingField>true</missingField>" + "<detailMessage>hoho</detailMessage>" + "<stackTrace><trace>" + "hudson.util.XStream2Test.testUnmarshalThrowableMissingField(XStream2Test.java:97)" + "</trace></stackTrace>" + "</myFailure></hudson.util.XStream2Test_-Baz>"); // Object should load, despite "missingField" in XML above assertEquals("hoho", baz.myFailure.getMessage()); enableLogging(oldLevel); }
public static SqlPrimitiveType of(final String typeName) { switch (typeName.toUpperCase()) { case INT: return SqlPrimitiveType.of(SqlBaseType.INTEGER); case VARCHAR: return SqlPrimitiveType.of(SqlBaseType.STRING); default: try { final SqlBaseType sqlType = SqlBaseType.valueOf(typeName.toUpperCase()); return SqlPrimitiveType.of(sqlType); } catch (final IllegalArgumentException e) { throw new SchemaException("Unknown primitive type: " + typeName, e); } } }
@Test public void shouldSupportAllPrimitiveTypeNames() { // Given: final Set<String> typeNames = ImmutableSet.of( "INT", "VARCHAR", "BOOLEAN", "BIGINT", "DOUBLE", "STRING", "TIMESTAMP" ); // When: final List<Boolean> missing = typeNames.stream() .map(SqlPrimitiveType::isPrimitiveTypeName) .filter(x -> !x) .collect(Collectors.toList()); // Then: assertThat(missing, is(empty())); }
@VisibleForTesting public void validateDictDataExists(Long id) { if (id == null) { return; } DictDataDO dictData = dictDataMapper.selectById(id); if (dictData == null) { throw exception(DICT_DATA_NOT_EXISTS); } }
@Test public void testValidateDictDataExists_notExists() { assertServiceException(() -> dictDataService.validateDictDataExists(randomLongId()), DICT_DATA_NOT_EXISTS); }
@Udf public <T> List<T> remove( @UdfParameter(description = "Array of values") final List<T> array, @UdfParameter(description = "Value to remove") final T victim) { if (array == null) { return null; } return array.stream() .filter(el -> !Objects.equals(el, victim)) .collect(Collectors.toList()); }
@Test public void shouldReturnNullForNullInputArray() { final List<String> input1 = null; final String input2 = "foo"; final List<String> result = udf.remove(input1, input2); assertThat(result, is(nullValue())); }
public static void main(String[] args) throws IOException { // create the file writer and execute the custom action FileWriterAction writeHello = writer -> writer.write("Gandalf was here"); new SimpleFileWriter("testfile.txt", writeHello); // print the file contents try (var scanner = new Scanner(new File("testfile.txt"))) { while (scanner.hasNextLine()) { LOGGER.info(scanner.nextLine()); } } }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@GetMapping("/getDocMenus") public ShenyuAdminResult getAllDoc() { Collection<DocInfo> docInfos = docManager.listAll(); List<MenuProjectVO> menuProjectList = docInfos.stream() .map(getMenuAndDocInfo()) .collect(Collectors.toList()); DocVO docVO = new DocVO(); docVO.setMenuProjects(menuProjectList); List<ShenyuDictVO> dictVOList = shenyuDictService.list(AdminConstants.DICT_TYPE_API_DOC_ENV); List<DocVO.EnvConfig> envConfigs = dictVOList.stream() .filter(ShenyuDictVO::getEnabled) .map(dictVO -> { DocVO.EnvConfig envConfig = new DocVO.EnvConfig(); envConfig.setEnvLabel(dictVO.getDictName()); envConfig.setAddressUrl(dictVO.getDictValue()); envConfig.setEnvDesc(dictVO.getDesc()); return envConfig; }) .collect(Collectors.toList()); docVO.setEnvProps(envConfigs); if (CollectionUtils.isNotEmpty(envConfigs)) { docVO.setGatewayUrl(envConfigs.get(0).getAddressUrl()); } return ShenyuAdminResult.success(docVO); }
@Test public void testGetAllDoc() throws Exception { List<ShenyuDictVO> shenyuDictVOS = new ArrayList<>(); given(this.shenyuDictService.list(any())).willReturn(shenyuDictVOS); this.mockMvc.perform(MockMvcRequestBuilders.get("/apidoc/getDocMenus")) .andExpect(status().isOk()) .andReturn(); }
void publishLogDelta( MetadataDelta delta, MetadataImage newImage, LogDeltaManifest manifest ) { bytesSinceLastSnapshot += manifest.numBytes(); if (bytesSinceLastSnapshot >= maxBytesSinceLastSnapshot) { if (eventQueue.isEmpty()) { scheduleEmit("we have replayed at least " + maxBytesSinceLastSnapshot + " bytes", newImage); } else if (log.isTraceEnabled()) { log.trace("Not scheduling bytes-based snapshot because event queue is not empty yet."); } } else if (maxTimeSinceLastSnapshotNs != 0 && (time.nanoseconds() - lastSnapshotTimeNs >= maxTimeSinceLastSnapshotNs)) { if (eventQueue.isEmpty()) { scheduleEmit("we have waited at least " + TimeUnit.NANOSECONDS.toMinutes(maxTimeSinceLastSnapshotNs) + " minute(s)", newImage); } else if (log.isTraceEnabled()) { log.trace("Not scheduling time-based snapshot because event queue is not empty yet."); } } else if (log.isTraceEnabled()) { log.trace("Neither time-based nor bytes-based criteria are met; not scheduling snapshot."); } }
@Test public void testEmitterProblem() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("SnapshotGenerator"); MockEmitter emitter = new MockEmitter().setProblem(new RuntimeException("oops")); try (SnapshotGenerator generator = new SnapshotGenerator.Builder(emitter). setFaultHandler(faultHandler). setMaxBytesSinceLastSnapshot(200). build()) { for (int i = 0; i < 2; i++) { generator.publishLogDelta(TEST_DELTA, TEST_IMAGE, logDeltaManifestBuilder().elapsedNs(10000).numBytes(50000).build()); } } assertEquals(Collections.emptyList(), emitter.images()); assertNotNull(faultHandler.firstException()); assertEquals(FaultHandlerException.class, faultHandler.firstException().getClass()); assertEquals("SnapshotGenerator: KRaft snapshot file generation error: oops", faultHandler.firstException().getMessage()); }
@Override public List<Class<?>> getExtensionClasses(String pluginId) { if (currentPluginId.equals(pluginId)) { return original.getExtensionClasses(pluginId); } else { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute getExtensionClasses for foreign pluginId!"); } }
@Test public void getExtensionClasses() { pluginManager.loadPlugins(); pluginManager.startPlugins(); assertEquals(1, wrappedPluginManager.getExtensionClasses(TestExtensionPoint.class).size()); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.getExtensionClasses(TestExtensionPoint.class, OTHER_PLUGIN_ID)); assertEquals(1, wrappedPluginManager.getExtensionClasses(TestExtensionPoint.class, THIS_PLUGIN_ID).size()); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.getExtensionClasses(OTHER_PLUGIN_ID)); assertEquals(1, wrappedPluginManager.getExtensionClasses(THIS_PLUGIN_ID).size()); }
public List<String> getChildren(final String key) { try { return client.getChildren().forPath(key); } catch (Exception e) { throw new ShenyuException(e); } }
@Test void getChildren() throws Exception { assertThrows(ShenyuException.class, () -> client.getChildren("/test")); GetChildrenBuilder getChildrenBuilder = mock(GetChildrenBuilder.class); when(curatorFramework.getChildren()).thenReturn(getChildrenBuilder); when(getChildrenBuilder.forPath(anyString())).thenReturn(new ArrayList<>()); List<String> children = client.getChildren("/test"); assertEquals(0, children.size()); }
@Override public boolean removeConfig(String dataId, String group) throws NacosException { return removeConfigInner(namespace, dataId, group, null); }
@Test void testRemoveConfig() throws NacosException { String dataId = "1"; String group = "2"; String tenant = ""; Mockito.when(mockWoker.removeConfig(dataId, group, tenant, null)).thenReturn(true); final boolean b = nacosConfigService.removeConfig(dataId, group); assertTrue(b); Mockito.verify(mockWoker, Mockito.times(1)).removeConfig(dataId, group, tenant, null); }
public static <E extends Enum<E>> FlagSet<E> createFlagSet( final Class<E> enumClass, final String prefix, final EnumSet<E> flags) { return new FlagSet<>(enumClass, prefix, flags); }
@Test public void testClassInequality() { final FlagSet<?> s1 = createFlagSet(SimpleEnum.class, KEYDOT, noneOf(SimpleEnum.class)); final FlagSet<?> s2 = createFlagSet(OtherEnum.class, KEYDOT, OtherEnum.a); Assertions.assertThat(s1) .describedAs("s1 == s2") .isNotEqualTo(s2); }
@Override public UseDefaultInsertColumnsToken generateSQLToken(final InsertStatementContext insertStatementContext) { String tableName = Optional.ofNullable(insertStatementContext.getSqlStatement().getTable()).map(optional -> optional.getTableName().getIdentifier().getValue()).orElse(""); Optional<UseDefaultInsertColumnsToken> previousSQLToken = findInsertColumnsToken(); if (previousSQLToken.isPresent()) { processPreviousSQLToken(previousSQLToken.get(), insertStatementContext, tableName); return previousSQLToken.get(); } return generateNewSQLToken(insertStatementContext, tableName); }
@Test void assertGenerateSQLTokenFromGenerateNewSQLToken() { generator.setPreviousSQLTokens(Collections.emptyList()); assertThat(generator.generateSQLToken(EncryptGeneratorFixtureBuilder.createInsertStatementContext(Collections.emptyList())).toString(), is("(id, name, status, pwd_cipher, pwd_assist, pwd_like)")); }
@VisibleForTesting static Optional<AbsoluteUnixPath> getWorkingDirectoryChecked(RawConfiguration rawConfiguration) throws InvalidWorkingDirectoryException { Optional<String> directory = rawConfiguration.getWorkingDirectory(); if (!directory.isPresent()) { return Optional.empty(); } String path = directory.get(); try { return Optional.of(AbsoluteUnixPath.get(path)); } catch (IllegalArgumentException ex) { throw new InvalidWorkingDirectoryException(path, path, ex); } }
@Test public void testGetWorkingDirectoryChecked_notAbsolute() { when(rawConfiguration.getWorkingDirectory()).thenReturn(Optional.of("relative/path")); InvalidWorkingDirectoryException exception = assertThrows( InvalidWorkingDirectoryException.class, () -> PluginConfigurationProcessor.getWorkingDirectoryChecked(rawConfiguration)); assertThat(exception).hasMessageThat().isEqualTo("relative/path"); assertThat(exception.getInvalidPathValue()).isEqualTo("relative/path"); }
@Override public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String location = new StoregateWriteFeature(session, fileid).start(file, status); final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status); return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")), new StoregateAttributesFinderFeature(session, fileid), status) { @Override public File getStatus() { return proxy.getResult(); } }; }
@Test public void testWriteWithLock() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(32769); final Path test = new StoregateTouchFeature(session, nodeid).touch( new Path(room, String.format("%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); final String lockId = new StoregateLockFeature(session, nodeid).lock(test); final TransferStatus status = new TransferStatus().withLength(-1L); final StoregateMultipartWriteFeature writer = new StoregateMultipartWriteFeature(session, nodeid); try { final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback()); fail(); } catch(LockedException e) { // } status.setLockId(lockId); final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); out.close(); new StoregateLockFeature(session, nodeid).unlock(test, lockId); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException { AuthenticationToken token = null; String authorization = request.getHeader(HttpConstants.AUTHORIZATION_HEADER); if (authorization == null || !AuthenticationHandlerUtil.matchAuthScheme(HttpConstants.BASIC, authorization)) { response.setHeader(WWW_AUTHENTICATE, HttpConstants.BASIC); response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); if (authorization == null) { logger.trace("Basic auth starting"); } else { logger.warn("'" + HttpConstants.AUTHORIZATION_HEADER + "' does not start with '" + HttpConstants.BASIC + "' : {}", authorization); } } else { authorization = authorization.substring(HttpConstants.BASIC.length()).trim(); final Base64 base64 = new Base64(0); // As per RFC7617, UTF-8 charset should be used for decoding. String[] credentials = new String(base64.decode(authorization), StandardCharsets.UTF_8).split(":", 2); if (credentials.length == 2) { token = authenticateUser(credentials[0], credentials[1]); response.setStatus(HttpServletResponse.SC_OK); } } return token; }
@Test(timeout = 60000) public void testRequestWithInvalidAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); final Base64 base64 = new Base64(0); String credentials = "bjones:invalidpassword"; Mockito.when(request.getHeader(HttpConstants.AUTHORIZATION_HEADER)) .thenReturn(base64.encodeToString(credentials.getBytes())); Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).setHeader(WWW_AUTHENTICATE, HttpConstants.BASIC); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); }
@JsonProperty("status") public Status status() { if (indices.isEmpty() || indices.stream().allMatch(i -> i.status() == Status.NOT_STARTED)) { return Status.NOT_STARTED; } else if (indices.stream().allMatch(RemoteReindexIndex::isCompleted)) { // all are now completed, either finished or errored if (indices.stream().anyMatch(i -> i.status() == Status.ERROR)) { return Status.ERROR; } else { return Status.FINISHED; } } else { return Status.RUNNING; } }
@Test void testStatusRunningWithError() { final RemoteReindexMigration migration = withIndices( index("one", RemoteReindexingMigrationAdapter.Status.FINISHED), index("two", RemoteReindexingMigrationAdapter.Status.ERROR), index("three", RemoteReindexingMigrationAdapter.Status.RUNNING) ); Assertions.assertThat(migration.status()).isEqualTo(RemoteReindexingMigrationAdapter.Status.RUNNING); }
@Override public long get() { return get(getAsync()); }
@Test public void testGetZero() { RAtomicLong ad2 = redisson.getAtomicLong("test"); assertThat(ad2.get()).isZero(); }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testHtmlCounterRequestContext() throws IOException { // cas où counterReportsByCounterName est null assertNotNull("HtmlCounterRequestContextReport", new HtmlCounterRequestContextReport( Collections.emptyList(), null, Collections.emptyList(), true, 500, writer)); // aucune requête en cours final HtmlCounterRequestContextReport report = new HtmlCounterRequestContextReport( Collections.emptyList(), Collections.emptyMap(), Collections.emptyList(), true, 500, writer); report.toHtml(); assertNotEmptyAndClear(writer); // cas où nb requêtes en cours > maxContextDisplayed final List<CounterRequestContext> counterRequestContexts = Collections .singletonList(new CounterRequestContext(sqlCounter, null, "Test", "Test", null, null, -1, -1, "sessionId")); final HtmlCounterRequestContextReport report2 = new HtmlCounterRequestContextReport( counterRequestContexts, null, Collections.emptyList(), true, 0, writer); report2.toHtml(); assertNotEmptyAndClear(writer); // writeTitleAndDetails report2.writeTitleAndDetails(); }
@VisibleForTesting void saveApprove(Long userId, Integer userType, String clientId, String scope, Boolean approved, LocalDateTime expireTime) { // 先更新 OAuth2ApproveDO approveDO = new OAuth2ApproveDO().setUserId(userId).setUserType(userType) .setClientId(clientId).setScope(scope).setApproved(approved).setExpiresTime(expireTime); if (oauth2ApproveMapper.update(approveDO) == 1) { return; } // 失败,则说明不存在,进行更新 oauth2ApproveMapper.insert(approveDO); }
@Test public void testSaveApprove_update() { // mock 数据 OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class); oauth2ApproveMapper.insert(approve); // 准备参数 Long userId = approve.getUserId(); Integer userType = approve.getUserType(); String clientId = approve.getClientId(); String scope = approve.getScope(); Boolean approved = randomBoolean(); LocalDateTime expireTime = LocalDateTime.ofInstant(randomDay(1, 30).toInstant(), ZoneId.systemDefault()); // mock 方法 // 调用 oauth2ApproveService.saveApprove(userId, userType, clientId, scope, approved, expireTime); // 断言 List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList(); assertEquals(1, result.size()); assertEquals(approve.getId(), result.get(0).getId()); assertEquals(userId, result.get(0).getUserId()); assertEquals(userType, result.get(0).getUserType()); assertEquals(clientId, result.get(0).getClientId()); assertEquals(scope, result.get(0).getScope()); assertEquals(approved, result.get(0).getApproved()); assertEquals(expireTime, result.get(0).getExpiresTime()); }
@Private @VisibleForTesting int pickDirectory(long randomPosition, final long[] availableOnDisk) { int dir = 0; // skip zero available space directory, // because totalAvailable is greater than 0 and randomPosition // is less than totalAvailable, we can find a valid directory // with nonzero available space. while (availableOnDisk[dir] == 0L) { dir++; } while (randomPosition >= availableOnDisk[dir]) { randomPosition -= availableOnDisk[dir++]; } return dir; }
@Test public void testPickDirectory() throws Exception { Configuration conf = new Configuration(); FileContext lfs = FileContext.getLocalFSFileContext(conf); DefaultContainerExecutor executor = new DefaultContainerExecutor(lfs); long[] availableOnDisk = new long[2]; availableOnDisk[0] = 100; availableOnDisk[1] = 100; assertEquals(0, executor.pickDirectory(0L, availableOnDisk)); assertEquals(0, executor.pickDirectory(99L, availableOnDisk)); assertEquals(1, executor.pickDirectory(100L, availableOnDisk)); assertEquals(1, executor.pickDirectory(101L, availableOnDisk)); assertEquals(1, executor.pickDirectory(199L, availableOnDisk)); long[] availableOnDisk2 = new long[5]; availableOnDisk2[0] = 100; availableOnDisk2[1] = 10; availableOnDisk2[2] = 400; availableOnDisk2[3] = 200; availableOnDisk2[4] = 350; assertEquals(0, executor.pickDirectory(0L, availableOnDisk2)); assertEquals(0, executor.pickDirectory(99L, availableOnDisk2)); assertEquals(1, executor.pickDirectory(100L, availableOnDisk2)); assertEquals(1, executor.pickDirectory(105L, availableOnDisk2)); assertEquals(2, executor.pickDirectory(110L, availableOnDisk2)); assertEquals(2, executor.pickDirectory(259L, availableOnDisk2)); assertEquals(3, executor.pickDirectory(700L, availableOnDisk2)); assertEquals(4, executor.pickDirectory(710L, availableOnDisk2)); assertEquals(4, executor.pickDirectory(910L, availableOnDisk2)); }
public static <WatermarkEstimatorStateT> WatermarkAndStateObserver<WatermarkEstimatorStateT> threadSafe( WatermarkEstimator<WatermarkEstimatorStateT> watermarkEstimator) { if (watermarkEstimator instanceof TimestampObservingWatermarkEstimator) { return new ThreadSafeTimestampObservingWatermarkEstimator<>(watermarkEstimator); } else if (watermarkEstimator instanceof ManualWatermarkEstimator) { return new ThreadSafeManualWatermarkEstimator<>(watermarkEstimator); } return new ThreadSafeWatermarkEstimator<>(watermarkEstimator); }
@Test public void testThreadSafeTimestampObservingWatermarkEstimator() throws Exception { WatermarkEstimators.WatermarkAndStateObserver<Instant> threadsafeWatermarkEstimator = WatermarkEstimators.threadSafe( new org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators .MonotonicallyIncreasing(GlobalWindow.TIMESTAMP_MIN_VALUE)); testWatermarkEstimatorSnapshotsStateWithCompetingThread( threadsafeWatermarkEstimator, ((TimestampObservingWatermarkEstimator) threadsafeWatermarkEstimator)::observeTimestamp); }
@Override public void w(String tag, String message, Object... args) { Log.w(tag, formatString(message, args)); }
@Test public void warningWithThrowableLoggedCorrectly() { String expectedMessage = "Hello World"; Throwable t = new Throwable("Test Throwable"); logger.w(t, tag, "Hello %s", "World"); assertLogged(WARN, tag, expectedMessage, t); }
static String sanitizeFileName(String s) { return s.trim().replace('\\', '.').replaceAll("[/,;]", "."); }
@Test void testConfigId2FileName() { assertEquals("admin.metrics.2088223-v6-1.ostk.bm2.prod.ne1.yahoo.com", sanitizeFileName("admin/metrics/2088223-v6-1.ostk.bm2.prod.ne1.yahoo.com")); assertEquals("admin.standalone.cluster-controllers.1", sanitizeFileName("admin/standalone/cluster-controllers/1 ")); }
public Blade start(String... args) { Class<?> caller = Arrays.stream(Thread.currentThread().getStackTrace()) .filter(st -> "main".equals(st.getMethodName())) .findFirst() .map(StackTraceElement::getClassName) .map(UncheckedFnKit.function(Class::forName)) .orElse(null); return this.start(caller, args); }
@Test public void testStart() { String[] args = null; Blade start = Blade.create().start(Hello.class, args); start.stop(); }
public void transitionTo(ClassicGroupState groupState) { assertValidTransition(groupState); previousState = state; state = groupState; currentStateTimestamp = Optional.of(time.milliseconds()); metrics.onClassicGroupStateTransition(previousState, state); }
@Test public void testPreparingRebalanceToPreparingRebalanceIllegalTransition() { group.transitionTo(PREPARING_REBALANCE); assertThrows(IllegalStateException.class, () -> group.transitionTo(PREPARING_REBALANCE)); }
@GET @Path("/health") @Operation(summary = "Health check endpoint to verify worker readiness and liveness") public Response healthCheck() throws Throwable { WorkerStatus workerStatus; int statusCode; try { FutureCallback<Void> cb = new FutureCallback<>(); herder.healthCheck(cb); long timeoutNs = TimeUnit.MILLISECONDS.toNanos(requestTimeout.healthCheckTimeoutMs()); long deadlineNs = timeoutNs + time.nanoseconds(); time.waitForFuture(cb, deadlineNs); statusCode = Response.Status.OK.getStatusCode(); workerStatus = WorkerStatus.healthy(); } catch (TimeoutException e) { String statusDetails = e instanceof StagedTimeoutException ? ((StagedTimeoutException) e).stage().summarize() : null; if (!herder.isReady()) { statusCode = Response.Status.SERVICE_UNAVAILABLE.getStatusCode(); workerStatus = WorkerStatus.starting(statusDetails); } else { statusCode = Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(); workerStatus = WorkerStatus.unhealthy(statusDetails); } } catch (ExecutionException e) { throw e.getCause(); } return Response.status(statusCode).entity(workerStatus).build(); }
@Test public void testHealthCheckStartingWithStage() throws Throwable { String stageDescription = "experiencing a simulated failure for testing purposes"; Stage stage = new Stage(stageDescription, 0); StagedTimeoutException exception = new StagedTimeoutException(stage); expectHealthCheck(exception); when(herder.isReady()).thenReturn(false); Response response = rootResource.healthCheck(); assertEquals(Response.Status.SERVICE_UNAVAILABLE.getStatusCode(), response.getStatus()); WorkerStatus expectedStatus = WorkerStatus.starting(stage.summarize()); WorkerStatus actualStatus = workerStatus(response); assertEquals(expectedStatus, actualStatus); assertTrue( actualStatus.message().contains(stageDescription), "Status message '" + actualStatus.message() + "' did not contain stage description '" + stageDescription + "'" ); }
public MetricGroup group(String groupName, String... tagKeyValues) { MetricGroupId groupId = groupId(groupName, tagKeyValues); MetricGroup group = groupsByName.get(groupId); if (group == null) { group = new MetricGroup(groupId); MetricGroup previous = groupsByName.putIfAbsent(groupId, group); if (previous != null) group = previous; } return group; }
@Test public void testGettingGroupMultipleTimes() { MetricGroup group1 = metrics.group("name"); MetricGroup group2 = metrics.group("name"); assertNotNull(group1); assertSame(group1, group2); MetricGroup group3 = metrics.group("other"); assertNotNull(group3); assertNotSame(group1, group3); // Now with tags MetricGroup group4 = metrics.group("name", "k1", "v1"); assertNotNull(group4); assertNotSame(group1, group4); assertNotSame(group2, group4); assertNotSame(group3, group4); MetricGroup group5 = metrics.group("name", "k1", "v1"); assertSame(group4, group5); }
public static String extractFromURIParams(String paramsRule, String uri) { Multimap<String, String> criteriaMap = TreeMultimap.create(); if (uri.contains("?") && uri.contains("=")) { String parameters = uri.substring(uri.indexOf("?") + 1); for (String parameter : parameters.split("&")) { String[] pair = parameter.split("="); if (pair.length > 1) { String key = URLDecoder.decode(pair[0], StandardCharsets.UTF_8); String value = URLDecoder.decode(pair[1], StandardCharsets.UTF_8); criteriaMap.put(key, value); } } // Just appends sorted entries, separating them with ?. StringBuilder result = new StringBuilder(); for (Map.Entry<String, String> criteria : criteriaMap.entries()) { if (paramsRule.contains(criteria.getKey())) { result.append("?").append(criteria.getKey()).append("=").append(criteria.getValue()); } } return result.toString(); } return ""; }
@Test void testExtractFromURIParamsWithEmpty() { // Check with parameters that allows empty. String requestPath = "/search?param1=test&param2=&param3="; // Only 1 parameter should be taken into account according to rules. String dispatchCriteria = DispatchCriteriaHelper.extractFromURIParams("param1 && param2", requestPath); assertEquals("?param1=test", dispatchCriteria); }
@Override public Token login(LoginRequest loginRequest) { final UserEntity userEntityFromDB = userRepository .findUserEntityByEmail(loginRequest.getEmail()) .orElseThrow( () -> new UserNotFoundException("Can't find with given email: " + loginRequest.getEmail()) ); if (Boolean.FALSE.equals(passwordEncoder.matches( loginRequest.getPassword(), userEntityFromDB.getPassword()))) { throw new PasswordNotValidException(); } return tokenService.generateToken(userEntityFromDB.getClaims()); }
@Test void login_InvalidPassword_ThrowsPasswordNotValidException() { // Given LoginRequest loginRequest = LoginRequest.builder() .email("test@example.com") .password("invalidPassword") .build(); UserEntity userEntity = new UserEntityBuilder() .withEmail(loginRequest.getEmail()) .withPassword("encodedPassword") .build(); // When when(userRepository.findUserEntityByEmail(loginRequest.getEmail())) .thenReturn(Optional.of(userEntity)); when(passwordEncoder.matches(loginRequest.getPassword(), userEntity.getPassword())) .thenReturn(false); // Then PasswordNotValidException exception = assertThrows(PasswordNotValidException.class, () -> userLoginService.login(loginRequest)); assertNotNull(exception); // Verify verify(userRepository).findUserEntityByEmail(loginRequest.getEmail()); verify(passwordEncoder).matches(loginRequest.getPassword(), userEntity.getPassword()); verifyNoInteractions(tokenService); }
static public FileSize valueOf(String fileSizeStr) { Matcher matcher = FILE_SIZE_PATTERN.matcher(fileSizeStr); long coefficient; if (matcher.matches()) { String lenStr = matcher.group(DOUBLE_GROUP); String unitStr = matcher.group(UNIT_GROUP); long lenValue = Long.valueOf(lenStr); if (unitStr.equalsIgnoreCase("")) { coefficient = 1; } else if (unitStr.equalsIgnoreCase("kb")) { coefficient = KB_COEFFICIENT; } else if (unitStr.equalsIgnoreCase("mb")) { coefficient = MB_COEFFICIENT; } else if (unitStr.equalsIgnoreCase("gb")) { coefficient = GB_COEFFICIENT; } else { throw new IllegalStateException("Unexpected " + unitStr); } return new FileSize(lenValue * coefficient); } else { throw new IllegalArgumentException("String value [" + fileSizeStr + "] is not in the expected format."); } }
@Test public void testValueOf() { { FileSize fs = FileSize.valueOf("8"); assertEquals(8, fs.getSize()); } { FileSize fs = FileSize.valueOf("8 kbs"); assertEquals(8 * KB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("8 kb"); assertEquals(8 * KB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("12 mb"); assertEquals(12 * MB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("5 GBs"); assertEquals(5 * GB_CO, fs.getSize()); } }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testMaxNumberOfTransfersPerShedderCycle() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); ctx.brokerConfiguration() .setLoadBalancerMaxNumberOfBrokerSheddingPerCycle(10); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
@Override public Response toResponse(Throwable t) { int status = DEFAULT_STATUS; if (t instanceof WebApplicationException) { status = ((WebApplicationException) t).getResponse().getStatus(); } SimpleHttpErrorInfo errorInfo = new SimpleHttpErrorInfo(status, t.getMessage()); return Response.status(status).entity(errorInfo).type(MediaType.APPLICATION_JSON).build(); }
@Test public void testToResponseWithGenericException() { // Arrange Throwable throwable = new RuntimeException("Internal Server Error"); // Act Response response = _exceptionMapper.toResponse(throwable); // Assert assertEquals(response.getStatus(), 500); SimpleHttpErrorInfo errorInfo = (SimpleHttpErrorInfo) response.getEntity(); assertEquals(errorInfo.getCode(), 500); assertEquals(errorInfo.getError(), "Internal Server Error"); }
@GET @Produces(MediaType.APPLICATION_JSON) public Response getVirtualNetworks() { Set<TenantId> tenantIds = vnetAdminService.getTenantIds(); List<VirtualNetwork> allVnets = tenantIds.stream() .map(tenantId -> vnetService.getVirtualNetworks(tenantId)) .flatMap(Collection::stream) .collect(Collectors.toList()); return ok(encodeArray(VirtualNetwork.class, "vnets", allVnets)).build(); }
@Test public void testGetVirtualNetworksByNonExistentTenantId() { String tenantIdName = "NON_EXISTENT_TENANT_ID"; expect(mockVnetAdminService.getTenantIds()).andReturn(ImmutableSet.of(tenantId3)).anyTimes(); replay(mockVnetAdminService); expect(mockVnetService.getVirtualNetworks(anyObject())).andReturn(ImmutableSet.of()).anyTimes(); replay(mockVnetService); WebTarget wt = target(); try { wt.path("vnets/" + tenantIdName) .request() .get(String.class); fail("Get of a non-existent virtual network did not throw an exception"); } catch (NotFoundException ex) { assertThat(ex.getMessage(), containsString("HTTP 404 Not Found")); } verify(mockVnetService); verify(mockVnetAdminService); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testEqualsNaN() { UnboundPredicate<Float> expected = org.apache.iceberg.expressions.Expressions.isNaN("field3"); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(resolve(Expressions.$("field3").isEqual(Expressions.lit(Float.NaN)))); assertThat(actual).isPresent(); assertPredicatesMatch(expected, actual.get()); Optional<org.apache.iceberg.expressions.Expression> actual1 = FlinkFilters.convert(resolve(Expressions.lit(Float.NaN).isEqual(Expressions.$("field3")))); assertThat(actual1).isPresent(); assertPredicatesMatch(expected, actual1.get()); }
public void verifyStatus() { if (sequential) { boolean unopenedIterator = responseIterator == null && !responses.isEmpty(); if (unopenedIterator || responseIterator.hasNext()) { throw new VerificationAssertionError("More executions were expected"); } } }
@Test void paramsEncoding() { List<Contributor> contributors = github.contributors("7 7", "netflix", "feign"); assertThat(contributors).hasSize(30); mockClient.verifyStatus(); }
public static Method getApplyMethod(ScalarFn scalarFn) { Class<? extends ScalarFn> clazz = scalarFn.getClass(); Collection<Method> matches = ReflectHelpers.declaredMethodsWithAnnotation( ScalarFn.ApplyMethod.class, clazz, ScalarFn.class); if (matches.isEmpty()) { throw new IllegalArgumentException( String.format( "No method annotated with @%s found in class %s.", ScalarFn.ApplyMethod.class.getSimpleName(), clazz.getName())); } // If we have at least one match, then either it should be the only match // or it should be an extension of the other matches (which came from parent // classes). Method first = matches.iterator().next(); for (Method other : matches) { if (!first.getName().equals(other.getName()) || !Arrays.equals(first.getParameterTypes(), other.getParameterTypes())) { throw new IllegalArgumentException( String.format( "Found multiple methods annotated with @%s. [%s] and [%s]", ScalarFn.ApplyMethod.class.getSimpleName(), ReflectHelpers.formatMethod(first), ReflectHelpers.formatMethod(other))); } } // Method must be public. if ((first.getModifiers() & Modifier.PUBLIC) == 0) { throw new IllegalArgumentException( String.format("Method %s is not public.", ReflectHelpers.formatMethod(first))); } return first; }
@Test public void testMissingAnnotationThrowsIllegalArgumentException() { thrown.expect(instanceOf(IllegalArgumentException.class)); thrown.expectMessage("No method annotated with @ApplyMethod found in class"); ScalarFnReflector.getApplyMethod(new IncrementFnMissingAnnotation()); }
static void validate(KafkaConsumer<byte[], byte[]> consumer, byte[] message, ConsumerRecords<byte[], byte[]> records) { if (records.isEmpty()) { consumer.commitSync(); throw new RuntimeException("poll() timed out before finding a result (timeout:[" + POLL_TIMEOUT_MS + "])"); } //Check result matches the original record String sent = new String(message, StandardCharsets.UTF_8); String read = new String(records.iterator().next().value(), StandardCharsets.UTF_8); if (!read.equals(sent)) { consumer.commitSync(); throw new RuntimeException("The message read [" + read + "] did not match the message sent [" + sent + "]"); } //Check we only got the one message if (records.count() != 1) { int count = records.count(); consumer.commitSync(); throw new RuntimeException("Only one result was expected during this test. We found [" + count + "]"); } }
@Test @SuppressWarnings("unchecked") public void shouldPassInValidation() { Iterator<ConsumerRecord<byte[], byte[]>> iterator = mock(Iterator.class); ConsumerRecord<byte[], byte[]> record = mock(ConsumerRecord.class); when(records.isEmpty()).thenReturn(false); when(records.iterator()).thenReturn(iterator); when(iterator.next()).thenReturn(record); when(record.value()).thenReturn("kafkaa".getBytes(StandardCharsets.UTF_8)); when(records.count()).thenReturn(1); assertDoesNotThrow(() -> EndToEndLatency.validate(consumer, "kafkaa".getBytes(StandardCharsets.UTF_8), records)); }
@Deprecated public static BoundedSource<Long> upTo(long numElements) { checkArgument( numElements >= 0, "numElements (%s) must be greater than or equal to 0", numElements); return new BoundedCountingSource(0, numElements); }
@Test @Category({ ValidatesRunner.class, UsesStatefulParDo.class // This test fails if State is unsupported despite no direct usage. }) public void testBoundedSourceSplits() throws Exception { long numElements = 1000; long numSplits = 10; long splitSizeBytes = numElements * 8 / numSplits; // 8 bytes per long element. BoundedSource<Long> initial = CountingSource.upTo(numElements); List<? extends BoundedSource<Long>> splits = initial.split(splitSizeBytes, p.getOptions()); assertEquals("Expected exact splitting", numSplits, splits.size()); // Assemble all the splits into one flattened PCollection, also verify their sizes. PCollectionList<Long> pcollections = PCollectionList.empty(p); for (int i = 0; i < splits.size(); ++i) { BoundedSource<Long> split = splits.get(i); pcollections = pcollections.and(p.apply("split" + i, Read.from(split))); assertEquals( "Expected even splitting", splitSizeBytes, split.getEstimatedSizeBytes(p.getOptions())); } PCollection<Long> input = pcollections.apply(Flatten.pCollections()); addCountingAsserts(input, numElements); p.run(); }
@Override public Optional<Language> find(String languageKey) { return Optional.ofNullable(languagesByKey.get(languageKey)); }
@Test public void find_on_empty_LanguageRepository_returns_absent() { assertThat(new LanguageRepositoryImpl().find(ANY_KEY)).isEmpty(); }
@Nonnull public static List<JetSqlRow> evaluate( @Nullable Expression<Boolean> predicate, @Nullable List<Expression<?>> projection, @Nonnull Stream<JetSqlRow> rows, @Nonnull ExpressionEvalContext context ) { return rows .map(row -> evaluate(predicate, projection, row, context)) .filter(Objects::nonNull) .collect(Collectors.toList()); }
@Test public void test_evaluateWithPredicateAndProjection() { List<Object[]> rows = asList(new Object[]{0, "a"}, new Object[]{1, "b"}, new Object[]{2, "c"}); Expression<Boolean> predicate = new FunctionalPredicateExpression(row -> { int value = row.get(0); return value != 1; }); MultiplyFunction<?> projection = MultiplyFunction.create(ColumnExpression.create(0, INT), ConstantExpression.create(2, INT), INT); List<JetSqlRow> evaluated = ExpressionUtil.evaluate(predicate, singletonList(projection), rows.stream().map(v -> new JetSqlRow(TEST_SS, v)), mock(ExpressionEvalContext.class)); assertThat(toList(evaluated, JetSqlRow::getValues)).containsExactly(new Object[]{0}, new Object[]{4}); }
@Override public FinishApplicationMasterResponse finishApplicationMaster( FinishApplicationMasterRequest request) throws YarnException, IOException { this.metrics.incrRequestCount(); long startTime = clock.getTime(); try { RequestInterceptorChainWrapper pipeline = authorizeAndGetInterceptorChain(); LOG.info("Finishing application master for {}. Tracking Url: {}.", pipeline.getApplicationAttemptId(), request.getTrackingUrl()); FinishApplicationMasterResponse response = pipeline.getRootInterceptor().finishApplicationMaster(request); long endTime = clock.getTime(); this.metrics.succeededFinishAMRequests(endTime - startTime); LOG.info("FinishAM finished with isUnregistered = {} in {} ms for {}.", response.getIsUnregistered(), endTime - startTime, pipeline.getApplicationAttemptId()); return response; } catch (Throwable t) { this.metrics.incrFailedFinishAMRequests(); throw t; } }
@Test public void testFinishInvalidApplicationMaster() { try { // Try to finish an application master that was not registered. finishApplicationMaster(4, FinalApplicationStatus.SUCCEEDED); Assert .fail("The request to finish application master should have failed"); } catch (Throwable ex) { // This is expected. So nothing required here. LOG.info("Finish registration failed as expected because it was not registered"); } }
static @CheckForNull Manifest parsePluginManifest(URL bundledJpi) { try (URLClassLoader cl = new URLClassLoader("Temporary classloader for parsing " + bundledJpi.toString(), new URL[]{bundledJpi}, ClassLoader.getSystemClassLoader())) { InputStream in = null; try { URL res = cl.findResource(PluginWrapper.MANIFEST_FILENAME); if (res != null) { in = getBundledJpiManifestStream(res); return new Manifest(in); } } finally { Util.closeAndLogFailures(in, LOGGER, PluginWrapper.MANIFEST_FILENAME, bundledJpi.toString()); } } catch (IOException e) { LOGGER.log(WARNING, "Failed to parse manifest of " + bundledJpi, e); } return null; }
@Test public void shouldProperlyParseManifestFromJar() throws IOException { File jar = createHpiWithManifest(); final Manifest manifest = PluginManager.parsePluginManifest(jar.toURI().toURL()); assertThat("manifest should have been read from the sample", manifest, notNullValue()); assertAttribute(manifest, "Created-By", "Apache Maven"); assertAttribute(manifest, "Short-Name", "matrix-auth"); // Multi-line entries assertAttribute(manifest, "Specification-Title", "Offers matrix-based security authorization strategies (global and per-project)."); assertAttribute(manifest, "Url", "http://wiki.jenkins-ci.org/display/JENKINS/Matrix+Authorization+Strategy+Plugin"); // Empty field assertAttribute(manifest, "Plugin-Developers", null); }
@ExecuteOn(TaskExecutors.IO) @Post(uri = "/labels/by-ids") @Operation(tags = {"Executions"}, summary = "Set labels on a list of executions") @ApiResponse(responseCode = "200", description = "On success", content = {@Content(schema = @Schema(implementation = BulkResponse.class))}) @ApiResponse(responseCode = "422", description = "Killed with errors", content = {@Content(schema = @Schema(implementation = BulkErrorResponse.class))}) public MutableHttpResponse<?> setLabelsByIds( @Parameter(description = "The request") @Body SetLabelsByIdsRequest setLabelsByIds ) { List<Execution> executions = new ArrayList<>(); Set<ManualConstraintViolation<String>> invalids = new HashSet<>(); for (String executionId : setLabelsByIds.executionsId()) { Optional<Execution> execution = executionRepository.findById(tenantService.resolveTenant(), executionId); if (execution.isPresent() && !execution.get().getState().isTerminated()) { invalids.add(ManualConstraintViolation.of( "execution is not terminated", executionId, String.class, "execution", executionId )); } else if (execution.isEmpty()) { invalids.add(ManualConstraintViolation.of( "execution not found", executionId, String.class, "execution", executionId )); } else { executions.add(execution.get()); } } if (!invalids.isEmpty()) { return HttpResponse.badRequest(BulkErrorResponse .builder() .message("invalid bulk set labels") .invalids(invalids) .build() ); } executions.forEach(execution -> setLabels(execution, setLabelsByIds.executionLabels())); return HttpResponse.ok(BulkResponse.builder().count(executions.size()).build()); }
@Test void setLabelsByIds() { Execution result1 = triggerInputsFlowExecution(true); Execution result2 = triggerInputsFlowExecution(true); Execution result3 = triggerInputsFlowExecution(true); BulkResponse response = client.toBlocking().retrieve( HttpRequest.POST("/api/v1/executions/labels/by-ids", new ExecutionController.SetLabelsByIdsRequest(List.of(result1.getId(), result2.getId(), result3.getId()), List.of(new Label("key", "value"))) ), BulkResponse.class ); assertThat(response.getCount(), is(3)); }
public static void print(Context context) { SINGLETON.print(context, 0); }
@Test public void testBasic() { Context context = new ContextBase(); context.getStatusManager().add(new InfoStatus("test", this)); StatusPrinter.print(context); String result = outputStream.toString(); assertTrue(result.contains("|-INFO in " + this.getClass().getName())); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void compositedKeywordFollowedByOptions() throws ScanException { { List<Token> tl = new TokenStream("%d(A){o}", new AlmostAsIsEscapeUtil()).tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.COMPOSITE_KEYWORD, "d")); witness.add(new Token(Token.LITERAL, "A")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); List<String> ol = new ArrayList<String>(); ol.add("o"); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } }
public URI getOrigin() { return origin; }
@Test public void testOriginCustom() { assertEquals(URI.create("test.cyberduck.ch"), new Distribution(Distribution.DOWNLOAD, URI.create("test.cyberduck.ch"), false).getOrigin()); }
@Override public void execute(GraphModel graphModel) { Graph graph = graphModel.getGraphVisible(); execute(graph); }
@Test public void testSelfLoopDirectedGraphDegree() { GraphModel graphModel = GraphGenerator.generateSelfLoopDirectedGraph(1); DirectedGraph graph = graphModel.getDirectedGraph(); Node n = graph.getNode("0"); WeightedDegree d = new WeightedDegree(); d.execute(graph); assertEquals(n.getAttribute(WeightedDegree.WDEGREE), 2.0); assertEquals(n.getAttribute(WeightedDegree.WINDEGREE), 1.0); assertEquals(n.getAttribute(WeightedDegree.WOUTDEGREE), 1.0); }
public static <T> TimestampedValues<T> timestamped(Iterable<TimestampedValue<T>> elems) { return new TimestampedValues<>(elems, Optional.absent(), Optional.absent()); }
@Test public void testCreateTimestampedEmptyUnspecifiedCoder() { p.enableAbandonedNodeEnforcement(false); thrown.expect(IllegalArgumentException.class); thrown.expectMessage("determine a default Coder"); thrown.expectMessage("Create.empty(Coder)"); thrown.expectMessage("Create.empty(TypeDescriptor)"); thrown.expectMessage("withCoder(Coder)"); thrown.expectMessage("withType(TypeDescriptor)"); p.apply(Create.timestamped(new ArrayList<>())); }
public boolean includes(String ipAddress) { if (all) { return true; } if (ipAddress == null) { throw new IllegalArgumentException("ipAddress is null."); } try { return includes(addressFactory.getByName(ipAddress)); } catch (UnknownHostException e) { return false; } }
@Test public void testIPandCIDRs() { //create MachineList with a list of of ip ranges and ip addresses MachineList ml = new MachineList(IP_CIDR_LIST, new TestAddressFactory()); //test for inclusion with an known IP assertTrue(ml.includes("10.119.103.112")); //test for exclusion with an unknown IP assertFalse(ml.includes("10.119.103.111")); //CIDR Ranges assertFalse(ml.includes("10.221.255.255")); assertTrue(ml.includes("10.222.0.0")); assertTrue(ml.includes("10.222.255.255")); assertFalse(ml.includes("10.223.0.0")); assertFalse(ml.includes("10.241.22.255")); assertTrue(ml.includes("10.241.23.0")); assertTrue(ml.includes("10.241.23.255")); assertFalse(ml.includes("10.241.24.0")); }
@Override public ProcResult fetchResult() throws AnalysisException { Preconditions.checkNotNull(clusterInfoService); BaseProcResult result = new BaseProcResult(); result.setNames(getMetadata()); final List<List<String>> backendInfos = getClusterBackendInfos(); for (List<String> backendInfo : backendInfos) { List<String> oneInfo = new ArrayList<>(backendInfo.size()); oneInfo.addAll(backendInfo); result.addRow(oneInfo); } return result; }
@Test public void testFetchResultSharedData() throws AnalysisException { new Expectations() { { RunMode.isSharedDataMode(); minTimes = 0; result = true; } }; BackendsProcDir dir = new BackendsProcDir(systemInfoService); ProcResult result = dir.fetchResult(); Assert.assertNotNull(result); Assert.assertTrue(result instanceof BaseProcResult); int columnIndex = getTabletNumColumnIndex(result.getColumnNames()); Assert.assertTrue(columnIndex >= 0); for (List<String> row : result.getRows()) { Assert.assertEquals(String.valueOf(tabletNumSharedData), row.get(columnIndex)); } }
@Override public CharSequence toXML(XmlEnvironment xmlEnvironment) { XmlStringBuilder sb = new XmlStringBuilder(this, xmlEnvironment); return sb.attribute(ELEM_URI, uri) .optAttribute(ELEM_MEDIA_TYPE, mediaType) .optAttribute(ELEM_WIDTH, width) .optAttribute(ELEM_HEIGHT, height) .closeEmptyElement(); }
@Test public void testFull() { ThumbnailElement full = new ThumbnailElement( "cid:sha1+ffd7c8d28e9c5e82afea41f97108c6b4@bob.xmpp.org", "image/png", 128, 96); assertXmlSimilar("<thumbnail xmlns='urn:xmpp:thumbs:1'\n" + "uri='cid:sha1+ffd7c8d28e9c5e82afea41f97108c6b4@bob.xmpp.org'\n" + "media-type='image/png'\n" + "width='128'\n" + "height='96'/>", full.toXML()); }
public static String channelToString(SocketAddress local1, SocketAddress remote1) { try { InetSocketAddress local = (InetSocketAddress) local1; InetSocketAddress remote = (InetSocketAddress) remote1; return toAddressString(local) + " -> " + toAddressString(remote); } catch (Exception e) { return local1 + "->" + remote1; } }
@Test public void channelToString() throws Exception { }
@Override public void visit(Entry entry) { if(Boolean.FALSE.equals(entry.getAttribute("allowed"))) return; if (containsSubmenu(entry)) addSubmenu(entry); else addActionItem(entry); }
@Test public void whenPopupMenuBecomesVisible_itsChildActionPopupListenerIsCalled() { if(Compat.isMacOsX()) return; menuEntry.addChild(actionEntry); menuActionGroupBuilder.visit(menuEntry); JMenu item = (JMenu) new EntryAccessor().getComponent(menuEntry); item.getPopupMenu().setVisible(true); verify(popupListener).childEntriesWillBecomeVisible(actionEntry); }
public static DeviceDescription combine(BasicDeviceConfig cfg, DeviceDescription descr) { if (cfg == null || descr == null) { return descr; } Device.Type type = descr.type(); if (cfg.type() != null && cfg.type() != type) { type = cfg.type(); } String manufacturer = descr.manufacturer(); if (cfg.manufacturer() != null && !cfg.manufacturer().equals(manufacturer)) { manufacturer = cfg.manufacturer(); } String hwVersion = descr.hwVersion(); if (cfg.hwVersion() != null && !cfg.hwVersion().equals(hwVersion)) { hwVersion = cfg.hwVersion(); } String swVersion = descr.swVersion(); if (cfg.swVersion() != null && !cfg.swVersion().equals(swVersion)) { swVersion = cfg.swVersion(); } String serial = descr.serialNumber(); if (cfg.serial() != null && !cfg.serial().equals(serial)) { serial = cfg.serial(); } SparseAnnotations sa = combine(cfg, descr.annotations()); return new DefaultDeviceDescription(descr.deviceUri(), type, manufacturer, hwVersion, swVersion, serial, descr.chassisId(), descr.isDefaultAvailable(), sa); }
@Test public void testDescOps() { DeviceDescription desc = BasicDeviceOperator.combine(null, DEV1); assertEquals(desc, DEV1); // override driver name desc = BasicDeviceOperator.combine(SW_BDC, DEV1); assertEquals(NAME1, desc.annotations().value(AnnotationKeys.DRIVER)); // override Device Information desc = BasicDeviceOperator.combine(RD_BDC, DEV1); assertEquals("Wrong type", ROADM, desc.type()); assertEquals("Wrong manufacturer", MANUFACTURER, desc.manufacturer()); assertEquals("Wrong HwVersion", HW_VERSION, desc.hwVersion()); assertEquals("Wrong swVersion", SW_VERSION, desc.swVersion()); assertEquals("Wrong serial", SERIAL, desc.serialNumber()); assertEquals("Wrong management Address", MANAGEMENT_ADDRESS, desc.annotations().value(AnnotationKeys.MANAGEMENT_ADDRESS)); // override Device type desc = BasicDeviceOperator.combine(OT_BDC, DEV1); assertEquals(OTN, desc.type()); }
@Nullable @Override public Collection<Message> decodeMessages(@Nonnull RawMessage rawMessage) { try { final ResolvableInetSocketAddress remoteAddress = rawMessage.getRemoteAddress(); final InetSocketAddress sender = remoteAddress != null ? remoteAddress.getInetSocketAddress() : null; final byte[] payload = rawMessage.getPayload(); if (payload.length < 3) { LOG.debug("NetFlow message (source: {}) doesn't even fit the NetFlow version (size: {} bytes)", sender, payload.length); return null; } final ByteBuf buffer = Unpooled.wrappedBuffer(payload); switch (buffer.readByte()) { case PASSTHROUGH_MARKER: final NetFlowV5Packet netFlowV5Packet = NetFlowV5Parser.parsePacket(buffer); return netFlowV5Packet.records().stream() .map(record -> netFlowFormatter.toMessage(netFlowV5Packet.header(), record, sender)) .collect(Collectors.toList()); case ORDERED_V9_MARKER: // our "custom" netflow v9 that has all the templates in the same packet return decodeV9(sender, buffer); default: final List<RawMessage.SourceNode> sourceNodes = rawMessage.getSourceNodes(); final RawMessage.SourceNode sourceNode = sourceNodes.isEmpty() ? null : sourceNodes.get(sourceNodes.size() - 1); final String inputId = sourceNode == null ? "<unknown>" : sourceNode.inputId; LOG.warn("Unsupported NetFlow packet on input {} (source: {})", inputId, sender); return null; } } catch (FlowException e) { LOG.error("Error parsing NetFlow packet <{}> received from <{}>", rawMessage.getId(), rawMessage.getRemoteAddress(), e); if (LOG.isDebugEnabled()) { LOG.debug("NetFlow packet hexdump:\n{}", ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(rawMessage.getPayload()))); } return null; } catch (InvalidProtocolBufferException e) { LOG.error("Invalid NetFlowV9 entry found, cannot parse the messages", ExceptionUtils.getRootCause(e)); return null; } }
@Test public void decodeMessagesThrowsEmptyTemplateExceptionWithIncompleteNetFlowV9() throws Exception { final byte[] b = Resources.toByteArray(Resources.getResource("netflow-data/netflow-v9-3_incomplete.dat")); final InetSocketAddress source = new InetSocketAddress(InetAddress.getLocalHost(), 12345); assertThat(codec.decodeMessages(new RawMessage(b, source))).isNull(); }
@Override public Iterator<T> iterator() { return new IndexedSetIterator(); }
@Test public void iteratorNext() { Iterator<Pair> it = mSet.iterator(); int intSum = 0; int expectedIntSum = 0; long longSum = 0; long expectedLongSum = 0; try { long l = 0; for (int i = 0; i < 3; i++) { for (int k = 0; k < 3; k++) { Pair pair = it.next(); intSum += pair.intValue(); longSum += pair.longValue(); expectedIntSum += i; expectedLongSum += l++; } } } catch (Exception e) { fail(); } assertEquals(expectedIntSum, intSum); assertEquals(expectedLongSum, longSum); assertFalse(it.hasNext()); }
static <T extends Type> String encodeDynamicArray(DynamicArray<T> value) { int size = value.getValue().size(); String encodedLength = encode(new Uint(BigInteger.valueOf(size))); String valuesOffsets = encodeArrayValuesOffsets(value); String encodedValues = encodeArrayValues(value); StringBuilder result = new StringBuilder(); result.append(encodedLength); result.append(valuesOffsets); result.append(encodedValues); return result.toString(); }
@SuppressWarnings("unchecked") @Test public void testEmptyArray() { DynamicArray<Uint> array = new DynamicArray(Uint.class); assertEquals( TypeEncoder.encodeDynamicArray(array), ("0000000000000000000000000000000000000000000000000000000000000000")); }
public static Duration parseDuration(String text) { checkNotNull(text); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final BigInteger value; try { value = new BigInteger(number); // this throws a NumberFormatException } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as an integer number.", e); } final ChronoUnit unit; if (unitLabel.isEmpty()) { unit = ChronoUnit.MILLIS; } else { unit = LABEL_TO_UNIT_MAP.get(unitLabel); } if (unit == null) { throw new IllegalArgumentException( "Time interval unit label '" + unitLabel + "' does not match any of the recognized units: " + TimeUnit.getAllUnits()); } try { return convertBigIntToDuration(value, unit); } catch (ArithmeticException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as java.time.Duration (numeric overflow).", e); } }
@Test void testParseDurationMillis() { assertThat(TimeUtils.parseDuration("1234").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234ms").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234milli").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234millis").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234millisecond").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234milliseconds").toMillis()).isEqualTo(1234); assertThat(TimeUtils.parseDuration("1234 ms").toMillis()).isEqualTo(1234); }
public boolean matches(Evidence evidence) { return sourceMatches(evidence) && confidenceMatches(evidence) && name.equalsIgnoreCase(evidence.getName()) && valueMatches(evidence); }
@Test public void testRegExWildcardSourceWildcardConfidenceMatching() throws Exception { final EvidenceMatcher regexMediumWildcardSourceMatcher = new EvidenceMatcher(null, "name", ".*value.*", true, null); assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_HIGHEST", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_HIGHEST)); assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_HIGH", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_HIGH)); assertFalse("regex wildcard source wildcard confidence matcher should not match REGEX_EVIDENCE_MEDIUM", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM)); assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_MEDIUM_SECOND_SOURCE", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM_SECOND_SOURCE)); assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_MEDIUM_THIRD_SOURCE", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM_THIRD_SOURCE)); assertFalse("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_LOW", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_LOW)); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_SINT16_BE() { final MutableData data = new MutableData(new byte[2]); data.setValue(-6192, Data.FORMAT_SINT16_BE, 0); assertArrayEquals(new byte[] { (byte) 0xE7, (byte) 0xD0} , data.getValue()); }
public ObjectMapper getObjectMapper() { return mapObjectMapper; }
@Test public void shouldNotOverrideProvidedObjectMapperProperties() { ObjectMapper objectMapper = new ObjectMapper(); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true); objectMapper.configure(DeserializationFeature.UNWRAP_ROOT_VALUE, false); JsonJacksonCodec codec = new JsonJacksonCodec(objectMapper); Assertions.assertTrue(objectMapper.getDeserializationConfig().isEnabled(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)); Assertions.assertFalse(codec.getObjectMapper().getDeserializationConfig().isEnabled(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)); Assertions.assertFalse(objectMapper.getDeserializationConfig().isEnabled(DeserializationFeature.UNWRAP_ROOT_VALUE)); Assertions.assertFalse(codec.getObjectMapper().getDeserializationConfig().isEnabled(DeserializationFeature.UNWRAP_ROOT_VALUE)); }
@Override public Select select() { return select; }
@Test void testUnion() { QueryAnalyzerImpl analyzer = new QueryAnalyzerImpl(database, "select name n from s_test t " + "union select name n from s_test t"); assertNotNull(analyzer.select().table.alias, "t"); assertNotNull(analyzer.select().table.metadata.getName(), "s_test"); assertNotNull(analyzer.select().getColumns().get("n")); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) throws InterpreterException { LOGGER.info("Running SQL query: '{}' over Pandas DataFrame", st); return pythonInterpreter.interpret( "z.show(pysqldf('" + st.trim() + "'))", context); }
@Test public void errorMessageIfDependenciesNotInstalled() throws InterpreterException { context = getInterpreterContext(); InterpreterResult ret = pandasSqlInterpreter.interpret("SELECT * from something", context); assertNotNull(ret); assertEquals(InterpreterResult.Code.ERROR, ret.code(), context.out.toString()); if (useIPython) { assertTrue(context.out.toString().contains("no such table: something"), context.out.toString()); } else { assertTrue(ret.toString().contains("no such table: something"), ret.toString()); } }
@Override public boolean isAdaptedLogger(Class<?> loggerClass) { Class<?> expectedLoggerClass = getExpectedLoggerClass(); if (null == expectedLoggerClass || !expectedLoggerClass.isAssignableFrom(loggerClass)) { return false; } return !isUpperLogback13(); }
@Test void testIsAdaptedLogger() { assertTrue(logbackNacosLoggingAdapter.isAdaptedLogger(Logger.class)); assertFalse(logbackNacosLoggingAdapter.isAdaptedLogger(java.util.logging.Logger.class)); }
public Set<String> roles() { ImmutableSet.Builder<String> roles = ImmutableSet.builder(); if (object.has(ROLES)) { ArrayNode roleNodes = (ArrayNode) object.path(ROLES); roleNodes.forEach(r -> roles.add(r.asText())); } return roles.build(); }
@Test public void roles() { cfg.roles(ROLES); print(cfg); assertEquals("not roles", ROLES, cfg.roles()); }
public static UArrayTypeTree create(UExpression elementType) { return new AutoValue_UArrayTypeTree(elementType); }
@Test public void serialization() { SerializableTester.reserializeAndAssert(UArrayTypeTree.create(UPrimitiveTypeTree.INT)); }
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) { Set<Integer> podIdsToRestart = new HashSet<>(); List<Future<Void>> futures = new ArrayList<>(pvcs.size()); for (PersistentVolumeClaim desiredPvc : pvcs) { Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName()) .compose(currentPvc -> { if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) { // This branch handles the following conditions: // * The PVC doesn't exist yet, we should create it // * The PVC is not Bound, we should reconcile it return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage")); Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage")); if (!currentSize.equals(desiredSize)) { // The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that) return resizePvc(kafkaStatus, currentPvc, desiredPvc); } else { // size didn't change, just reconcile return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } } }); futures.add(perPvcFuture); } return Future.all(futures) .map(podIdsToRestart); }
@Test public void testVolumesWaitingForRestart(VertxTestContext context) { List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null); if (currentPvc != null) { PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc) .withNewStatus() .withPhase("Bound") .withConditions(new PersistentVolumeClaimConditionBuilder() .withStatus("True") .withType("FileSystemResizePending") .build()) .withCapacity(Map.of("storage", new Quantity("50Gi", null))) .endStatus() .build(); return Future.succeededFuture(pvcWithStatus); } else { return Future.succeededFuture(); } }); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(RESIZABLE_STORAGE_CLASS)); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, mockSco ); Checkpoint async = context.checkpoint(); reconciler.resizeAndReconcilePvcs(new KafkaStatus(), pvcs) .onComplete(res -> { assertThat(res.succeeded(), is(true)); assertThat(res.result().size(), is(3)); assertThat(res.result(), is(Set.of(0, 1, 2))); assertThat(pvcCaptor.getAllValues().size(), is(0)); async.flag(); }); }
@Override public OUT nextRecord(OUT record) throws IOException { OUT returnRecord = null; do { returnRecord = super.nextRecord(record); } while (returnRecord == null && !reachedEnd()); return returnRecord; }
@Test void testEmptyFields() { try { final String fileContent = "|0|0|0|0|0|\n" + "1||1|1|1|1|\n" + "2|2||2|2|2|\n" + "3|3|3| |3|3|\n" + "4|4|4|4||4|\n" + "5|5|5|5|5||\n"; final FileInputSplit split = createTempFile(fileContent); final TupleTypeInfo<Tuple6<Short, Integer, Long, Float, Double, Byte>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo( Short.class, Integer.class, Long.class, Float.class, Double.class, Byte.class); final CsvInputFormat<Tuple6<Short, Integer, Long, Float, Double, Byte>> format = new TupleCsvInputFormat<>(PATH, typeInfo); format.setFieldDelimiter("|"); format.configure(new Configuration()); format.open(split); Tuple6<Short, Integer, Long, Float, Double, Byte> result = new Tuple6<>(); try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (ShortParser)"); } catch (ParseException e) { } try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (IntegerParser)"); } catch (ParseException e) { } try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (LongParser)"); } catch (ParseException e) { } try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (FloatParser)"); } catch (ParseException e) { } try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (DoubleParser)"); } catch (ParseException e) { } try { result = format.nextRecord(result); fail("Empty String Parse Exception was not thrown! (ByteParser)"); } catch (ParseException e) { } result = format.nextRecord(result); assertThat(result).isNull(); assertThat(format.reachedEnd()).isTrue(); } catch (Exception ex) { fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage()); } }
@SuppressWarnings("WeakerAccess") public Serde<?> defaultValueSerde() { final Object valueSerdeConfigSetting = get(DEFAULT_VALUE_SERDE_CLASS_CONFIG); if (valueSerdeConfigSetting == null) { throw new ConfigException("Please specify a value serde or set one through StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG"); } try { final Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException( String.format("Failed to configure value serde %s", valueSerdeConfigSetting), e); } }
@Test public void shouldSpecifyCorrectValueSerdeClassOnError() { final Properties props = getStreamsConfig(); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, MisconfiguredSerde.class); final StreamsConfig config = new StreamsConfig(props); try { config.defaultValueSerde(); fail("Test should throw a StreamsException"); } catch (final StreamsException e) { assertEquals( "Failed to configure value serde class org.apache.kafka.streams.StreamsConfigTest$MisconfiguredSerde", e.getMessage() ); } }
@Override public DeleteTopicsResult deleteTopics(final TopicCollection topics, final DeleteTopicsOptions options) { if (topics instanceof TopicIdCollection) return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics."); }
@Test public void testDeleteTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic"), prepareDeleteTopicsResponse("myTopic", Errors.NONE)); KafkaFuture<Void> future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); assertNull(future.get()); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic"), prepareDeleteTopicsResponse("myTopic", Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic"), prepareDeleteTopicsResponse("myTopic", Errors.UNKNOWN_TOPIC_OR_PARTITION)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, UnknownTopicOrPartitionException.class); // With topic IDs Uuid topicId = Uuid.randomUuid(); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId), prepareDeleteTopicsResponseWithTopicId(topicId, Errors.NONE)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); assertNull(future.get()); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId), prepareDeleteTopicsResponseWithTopicId(topicId, Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId), prepareDeleteTopicsResponseWithTopicId(topicId, Errors.UNKNOWN_TOPIC_ID)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); TestUtils.assertFutureError(future, UnknownTopicIdException.class); } }
@Override public void checkCanSetUser(Identity identity, AccessControlContext context, Optional<Principal> principal, String userName) { requireNonNull(principal, "principal is null"); requireNonNull(userName, "userName is null"); authenticationCheck(() -> systemAccessControl.get().checkCanSetUser(identity, context, principal, userName)); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Presto server is still initializing") public void testInitializing() { AccessControlManager accessControlManager = new AccessControlManager(createTestTransactionManager()); accessControlManager.checkCanSetUser( new Identity(USER_NAME, Optional.of(PRINCIPAL)), new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()), Optional.empty(), "foo"); }
public static JibContainerBuilder toJibContainerBuilder( Path projectRoot, Path buildFilePath, Build buildCommandOptions, CommonCliOptions commonCliOptions, ConsoleLogger logger) throws InvalidImageReferenceException, IOException { BuildFileSpec buildFile = toBuildFileSpec(buildFilePath, buildCommandOptions.getTemplateParameters()); Optional<BaseImageSpec> baseImageSpec = buildFile.getFrom(); JibContainerBuilder containerBuilder = baseImageSpec.isPresent() ? createJibContainerBuilder(baseImageSpec.get(), commonCliOptions, logger) : Jib.fromScratch(); buildFile.getCreationTime().ifPresent(containerBuilder::setCreationTime); buildFile.getFormat().ifPresent(containerBuilder::setFormat); containerBuilder.setEnvironment(buildFile.getEnvironment()); containerBuilder.setLabels(buildFile.getLabels()); containerBuilder.setVolumes(buildFile.getVolumes()); containerBuilder.setExposedPorts(buildFile.getExposedPorts()); buildFile.getUser().ifPresent(containerBuilder::setUser); buildFile.getWorkingDirectory().ifPresent(containerBuilder::setWorkingDirectory); buildFile.getEntrypoint().ifPresent(containerBuilder::setEntrypoint); buildFile.getCmd().ifPresent(containerBuilder::setProgramArguments); Optional<LayersSpec> layersSpec = buildFile.getLayers(); if (layersSpec.isPresent()) { containerBuilder.setFileEntriesLayers(Layers.toLayers(projectRoot, layersSpec.get())); } return containerBuilder; }
@Test public void testToBuildFileSpec_failWithMissingTemplateVariable() throws URISyntaxException, InvalidImageReferenceException, IOException { Path buildfile = Paths.get(Resources.getResource("buildfiles/projects/templating/missingVar.yaml").toURI()); try { BuildFiles.toJibContainerBuilder( buildfile.getParent(), buildfile, buildCli, commonCliOptions, consoleLogger); Assert.fail(); } catch (IllegalArgumentException iae) { MatcherAssert.assertThat( iae.getMessage(), CoreMatchers.startsWith("Cannot resolve variable 'missingVar'")); } }
@Override public void clear() { puts.reset(); misses.reset(); removals.reset(); hits.reset(); evictions.reset(); getTimeNanos.reset(); putTimeNanos.reset(); removeTimeNanos.reset(); }
@Test public void clear() { JCacheStatisticsMXBean stats = new JCacheStatisticsMXBean(); stats.recordHits(1); stats.recordMisses(1); stats.recordPuts(1); stats.recordRemovals(1); stats.recordEvictions(1); stats.recordGetTime(1); stats.recordPutTime(1); stats.recordRemoveTime(1); stats.clear(); assertThat(stats.getCacheHits()).isEqualTo(0L); assertThat(stats.getCacheMisses()).isEqualTo(0L); assertThat(stats.getCachePuts()).isEqualTo(0L); assertThat(stats.getCacheRemovals()).isEqualTo(0L); assertThat(stats.getCacheEvictions()).isEqualTo(0L); assertThat(stats.getAverageGetTime()).isEqualTo(0F); assertThat(stats.getAveragePutTime()).isEqualTo(0F); assertThat(stats.getAverageRemoveTime()).isEqualTo(0F); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { int type = columnDef.getColumnMeta() >> 8; int length = columnDef.getColumnMeta() & 0xff; // unpack type & length, see https://bugs.mysql.com/bug.php?id=37426. if (0x30 != (type & 0x30)) { length += ((type & 0x30) ^ 0x30) << 4; type |= 0x30; } switch (MySQLBinaryColumnType.valueOf(type)) { case ENUM: return readEnumValue(length, payload); case SET: return payload.getByteBuf().readByte(); case STRING: return new MySQLBinaryString(payload.readStringFixByBytes(readActualLength(length, payload))); default: throw new UnsupportedSQLOperationException(MySQLBinaryColumnType.valueOf(type).toString()); } }
@Test void assertReadEnumValueWithMeta2() { columnDef.setColumnMeta((MySQLBinaryColumnType.ENUM.getValue() << 8) + 2); when(payload.readInt2()).thenReturn(32767); assertThat(new MySQLStringBinlogProtocolValue().read(columnDef, payload), is(32767)); }
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void validate_fails_if_unsufficient_pull_request_privileges() throws Exception { String tokenResponse = "{\"scopes\": \"\", \"access_token\": \"token\", \"expires_in\": 7200, " + "\"token_type\": \"bearer\", \"state\": \"client_credentials\", \"refresh_token\": \"abc\"}"; server.enqueue(new MockResponse().setBody(tokenResponse)); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")) .withMessage(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); assertThat(logTester.logs(Level.INFO)).containsExactly(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, "")); }
@Override public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) { synchronized (lock) { notifyLeaderInformationChangeInternal( componentId, leaderInformation, confirmedLeaderInformation.forComponentIdOrEmpty(componentId)); } }
@Test void testSingleLeaderInformationChangedAndShouldBeCorrected() throws Exception { final AtomicReference<LeaderInformationRegister> storedLeaderInformation = new AtomicReference<>(); new Context(storedLeaderInformation) { { runTestWithSynchronousEventHandling( () -> { final UUID leaderSessionID = UUID.randomUUID(); grantLeadership(leaderSessionID); final LeaderInformation expectedLeaderInformation = LeaderInformation.known( leaderSessionID, contenderContext0.address); // Leader information changed on external storage. It should be // corrected. storedLeaderInformation.set(LeaderInformationRegister.empty()); leaderElectionService.onLeaderInformationChange( contenderContext0.componentId, LeaderInformation.empty()); assertThat( storedLeaderInformation .get() .forComponentId(contenderContext0.componentId)) .as("Removed leader information should have been reset.") .hasValue(expectedLeaderInformation); final LeaderInformation faultyLeaderInformation = LeaderInformation.known(UUID.randomUUID(), "faulty-address"); storedLeaderInformation.set( LeaderInformationRegister.of( contenderContext0.componentId, faultyLeaderInformation)); leaderElectionService.onLeaderInformationChange( contenderContext0.componentId, faultyLeaderInformation); assertThat( storedLeaderInformation .get() .forComponentId(contenderContext0.componentId)) .as("Overwritten leader information should have been reset.") .hasValue(expectedLeaderInformation); }); } }; }
public void subscriberDeregister(String serviceName, String groupName, String cluster) { String key = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), cluster); synchronized (subscribes) { SubscriberRedoData redoData = subscribes.get(key); if (null != redoData) { redoData.setUnregistering(true); redoData.setExpectedRegistered(false); } } }
@Test void testSubscriberDeregister() { ConcurrentMap<String, SubscriberRedoData> subscribes = getSubscriberRedoDataMap(); redoService.cacheSubscriberForRedo(SERVICE, GROUP, CLUSTER); redoService.subscriberDeregister(SERVICE, GROUP, CLUSTER); SubscriberRedoData actual = subscribes.entrySet().iterator().next().getValue(); assertTrue(actual.isUnregistering()); }
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) { return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs)); }
@Test public void testGroupingKeyTypeWithEvolvedUnpartitionedSpec() { TestTables.TestTable table = TestTables.create( tableDir, "test", SCHEMA, PartitionSpec.unpartitioned(), V1_FORMAT_VERSION); table.updateSpec().addField(Expressions.bucket("category", 8)).commit(); assertThat(table.specs()).hasSize(2); StructType expectedType = StructType.of(); StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values()); assertThat(actualType).isEqualTo(expectedType); }
public static void error(final Logger logger, final String format, final Supplier<Object> supplier) { if (logger.isErrorEnabled()) { logger.error(format, supplier.get()); } }
@Test public void testAtLeastOnceError() { when(logger.isErrorEnabled()).thenReturn(true); LogUtils.error(logger, supplier); verify(supplier, atLeastOnce()).get(); }
@Override public Mono<SearchResult> search(SearchOption option) { // validate the option var errors = validator.validateObject(option); if (errors.hasErrors()) { return Mono.error(new RequestBodyValidationException(errors)); } return extensionGetter.getEnabledExtension(SearchEngine.class) .filter(SearchEngine::available) .switchIfEmpty(Mono.error(SearchEngineUnavailableException::new)) .flatMap(searchEngine -> Mono.fromSupplier(() -> searchEngine.search(option) ).subscribeOn(Schedulers.boundedElastic())); }
@Test void shouldThrowSearchEngineUnavailableExceptionIfNoSearchEngineFound() { var option = new SearchOption(); option.setKeyword("halo"); var errors = mock(Errors.class); when(errors.hasErrors()).thenReturn(false); when(validator.validateObject(option)).thenReturn(errors); when(extensionGetter.getEnabledExtension(SearchEngine.class)).thenReturn(Mono.empty()); searchService.search(option) .as(StepVerifier::create) .expectError(SearchEngineUnavailableException.class) .verify(); }
@Override public boolean isEphemeral() { return true; }
@Test void testIsEphemeral() { assertTrue(connectionBasedClient.isEphemeral()); }
Capabilities getCapabilitiesFromResponseBody(String responseBody) { final CapabilitiesDTO capabilitiesDTO = FORCED_EXPOSE_GSON.fromJson(responseBody, CapabilitiesDTO.class); return capabilitiesConverterV4.fromDTO(capabilitiesDTO); }
@Test public void shouldGetCapabilitiesFromResponseBody() { String responseBody = "{\"supports_status_report\":\"true\",\"supports_agent_status_report\":\"true\"}"; Capabilities capabilities = new ElasticAgentExtensionConverterV4().getCapabilitiesFromResponseBody(responseBody); assertTrue(capabilities.supportsPluginStatusReport()); assertTrue(capabilities.supportsAgentStatusReport()); }
@Override public boolean isEnabled() { return gitLabSettings.isEnabled(); }
@Test public void newScribe_whenGitLabAuthIsDisabled_throws() { when(gitLabSettings.isEnabled()).thenReturn(false); assertThatIllegalStateException() .isThrownBy(() -> new GitLabIdentityProvider.ScribeFactory().newScribe(gitLabSettings, CALLBACK_URL, new ScribeGitLabOauth2Api(gitLabSettings))) .withMessage("GitLab authentication is disabled"); }
@Override public void visit(Entry target) { final EntryAccessor entryAccessor = new EntryAccessor(); final Component component = (Component) entryAccessor.removeComponent(target); if (component != null) { if(component instanceof AbstractButton) ((AbstractButton)component).setAction(null); removeMenuComponent(component); ActionEnabler actionEnabler = target.removeAttribute(ActionEnabler.class); if(actionEnabler != null){ final AFreeplaneAction action = entryAccessor.getAction(target); action.removePropertyChangeListener(actionEnabler); } } }
@Test public void removesComponentsFromParents() throws Exception { final JComponentRemover componentRemover = JComponentRemover.INSTANCE; final Entry entry = new Entry(); JComponent parent = new JPanel(); JComponent entryComponent = new JPanel(); parent.add(entryComponent); new EntryAccessor().setComponent(entry, entryComponent); componentRemover.visit(entry); assertThat(entryComponent.getParent(), nullValue(Container.class)); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testMemoryPercentageCpuAbsoluteCpuNegative() throws Exception { expectMissingResource("cpu"); parseResourceConfigValue("50% memory, -2 vcores"); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testInFlightFetchOnPausedPartition() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); subscriptions.pause(tp0); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertNull(fetchRecords().get(tp0)); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = adjustRephPosition(originalGlyphIds); intermediateGlyphsFromGsub = repositionGlyphs(intermediateGlyphsFromGsub); for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { if (feature.equals(RKRF_FEATURE) && gsubData.isFeatureSupported(VATU_FEATURE)) { // Create your own rkrf feature from vatu feature intermediateGlyphsFromGsub = applyRKRFFeature( gsubData.getFeature(VATU_FEATURE), intermediateGlyphsFromGsub); } LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(intermediateGlyphsFromGsub); }
@Test void testApplyTransforms_haln() { // given List<Integer> glyphsAfterGsub = Arrays.asList(539); // when List<Integer> result = gsubWorkerForDevanagari.applyTransforms(getGlyphIds("द्")); // then assertEquals(glyphsAfterGsub, result); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test public void test_node_name_default_for_cluster_mode() throws Exception { File homeDir = temp.newFolder(); Props props = new Props(new Properties()); props.set(CLUSTER_NAME.getKey(), "sonarqube"); props.set(Property.CLUSTER_ENABLED.getKey(), "true"); props.set(SEARCH_PORT.getKey(), "1234"); props.set(SEARCH_HOST.getKey(), "127.0.0.1"); props.set(PATH_HOME.getKey(), homeDir.getAbsolutePath()); props.set(PATH_DATA.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_TEMP.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath()); EsSettings esSettings = new EsSettings(props, new EsInstallation(props), system); Map<String, String> generated = esSettings.build(); assertThat(generated.get("node.name")).startsWith("sonarqube-"); }
public static Validator mapWithDoubleValue() { return (name, val) -> { if (!(val instanceof String)) { throw new ConfigException(name, val, "Must be a string"); } final String str = (String) val; final Map<String, String> map = KsqlConfig.parseStringAsMap(name, str); map.forEach((k, valueStr) -> { try { Double.parseDouble(valueStr); } catch (NumberFormatException e) { throw new ConfigException(name, valueStr, "Not a double"); } }); }; }
@Test public void shouldParseDoubleValueInMap() { // Given: final Validator validator = ConfigValidators.mapWithDoubleValue(); validator.ensureValid("propName", "foo:1.2,bar:3"); }
public static FuryBuilder builder() { return new FuryBuilder(); }
@Test public void testExposeFields() { Fury fury = Fury.builder().requireClassRegistration(false).build(); ImmutableMap<String, Integer> map1 = ImmutableMap.of("1", 1); ImmutableMap<String, Integer> map2 = ImmutableMap.of("2", 2); ExposeFields o = serDe(fury, new ExposeFields(1, 2, 3, map1, map2)); assertEquals(1, o.f1); assertEquals(2, o.f2); assertEquals(0, o.f3); assertEquals(o.map1, map1); assertNull(o.map2); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldHandleExceptionThrownDuringRecyclingStandbyTask() { final StandbyTask standbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions) .inState(State.RUNNING) .withInputPartitions(taskId03Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(standbyTaskToRecycle)); when(activeTaskCreator.createActiveTaskFromStandby( standbyTaskToRecycle, standbyTaskToRecycle.inputPartitions(), consumer)) .thenThrow(new RuntimeException()); final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>(); when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(future); future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle)); assertThrows( StreamsException.class, () -> taskManager.handleAssignment( mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())), Collections.emptyMap() ) ); verify(stateUpdater, never()).add(any()); verify(tasks, never()).addPendingTasksToInit(Collections.singleton(any())); verify(standbyTaskToRecycle).closeDirty(); }
@Override public Map<String, String> getAll() { return flags.values().stream().collect(Collectors.toMap(flag -> flag.name, flag -> flag.value)); }
@Test void testFeatureFlagResourcesCouldBeRead() throws Exception { String file = Objects.requireNonNull(this.getClass() .getResource("/org/graylog2/featureflag/custom-feature-flag.config")).toURI().getPath(); FeatureFlags flags = factory.createImmutableFeatureFlags(file, metricRegistry); assertThat(flags.getAll().keySet()).contains("feature1"); }
public static InternalLogger getInstance(Class<?> clazz) { return getInstance(clazz.getName()); }
@Test public void testIsWarnEnabled() { when(mockLogger.isWarnEnabled()).thenReturn(true); InternalLogger logger = InternalLoggerFactory.getInstance("mock"); assertTrue(logger.isWarnEnabled()); verify(mockLogger).isWarnEnabled(); }
@Override public void add(TimerTask timerTask) { timer.add(timerTask); }
@Test public void testReaper() throws Exception { try (Timer timer = new SystemTimerReaper("reaper", new SystemTimer("timer"))) { CompletableFuture<Void> t1 = add(timer, 100L); CompletableFuture<Void> t2 = add(timer, 200L); CompletableFuture<Void> t3 = add(timer, 300L); TestUtils.assertFutureThrows(t1, TimeoutException.class); TestUtils.assertFutureThrows(t2, TimeoutException.class); TestUtils.assertFutureThrows(t3, TimeoutException.class); } }
public void writeStringLenenc(final String value) { if (Strings.isNullOrEmpty(value)) { byteBuf.writeByte(0); return; } byte[] valueBytes = value.getBytes(charset); writeIntLenenc(valueBytes.length); byteBuf.writeBytes(valueBytes); }
@Test void assertWriteStringLenencWithEmpty() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeStringLenenc(""); verify(byteBuf).writeByte(0); }
@Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { return delegate.invokeAny(tasks); }
@Test public void invokeAny_delegates_to_executorService() throws ExecutionException, InterruptedException { underTest.invokeAny(CALLABLES); inOrder.verify(executorService).invokeAny(CALLABLES); inOrder.verifyNoMoreInteractions(); }