focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void validate_with_private_consumer() { String response = "{\"error_description\": \"Cannot use client_credentials with a consumer marked as \\\"public\\\". " + "Calls for auto generated consumers should use urn:bitbucket:oauth2:jwt instead.\", \"error\": \"invalid_grant\"}"; server.enqueue(new MockResponse().setBody(response).setResponseCode(400).setHeader("Content-Type", JSON_MEDIA_TYPE)); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")) .withMessage(UNABLE_TO_CONTACT_BBC_SERVERS + ": " + OAUTH_CONSUMER_NOT_PRIVATE); assertThat(logTester.logs(Level.INFO)).containsExactly(String.format(BBC_FAIL_WITH_RESPONSE, serverURL, "400", "invalid_grant")); }
public boolean unblock() { final AtomicBuffer buffer = this.buffer; final long headPosition = buffer.getLongVolatile(headPositionIndex); final long tailPosition = buffer.getLongVolatile(tailPositionIndex); if (headPosition == tailPosition) { return false; } final int mask = capacity - 1; final int consumerIndex = (int)(headPosition & mask); final int producerIndex = (int)(tailPosition & mask); boolean unblocked = false; int length = buffer.getIntVolatile(consumerIndex); if (length < 0) { buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID); buffer.putIntOrdered(lengthOffset(consumerIndex), -length); unblocked = true; } else if (0 == length) { // go from (consumerIndex to producerIndex) or (consumerIndex to capacity) final int limit = producerIndex > consumerIndex ? producerIndex : capacity; int i = consumerIndex + ALIGNMENT; do { // read the top int of every long (looking for length aligned to 8=ALIGNMENT) length = buffer.getIntVolatile(i); if (0 != length) { if (scanBackToConfirmStillZeroed(buffer, i, consumerIndex)) { buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID); buffer.putIntOrdered(lengthOffset(consumerIndex), i - consumerIndex); unblocked = true; } break; } i += ALIGNMENT; } while (i < limit); } return unblocked; }
@Test void shouldUnblockMessageWithHeader() { final int messageLength = ALIGNMENT * 4; when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn((long)messageLength); when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn((long)messageLength * 2); when(buffer.getIntVolatile(messageLength)).thenReturn(-messageLength); assertTrue(ringBuffer.unblock()); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putInt(typeOffset(messageLength), PADDING_MSG_TYPE_ID); inOrder.verify(buffer).putIntOrdered(lengthOffset(messageLength), messageLength); }
public MapConfig setBackupCount(final int backupCount) { this.backupCount = checkBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void testSetBackupCountLowerLimit() { new MapConfig().setBackupCount(MapConfig.MIN_BACKUP_COUNT - 1); }
@Override public TraceContext context() { return context; }
@Test void hasRealContext() { assertThat(span.context().spanId()).isNotZero(); }
@Override public Long remove(final Object key) { return remove((long) (Long) key); }
@Test public void removeShouldReturnMissing() { assertEquals(MISSING_VALUE, map.remove(1L)); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldFailTerminateSourceTableQuery() { // Given: configuredStatement = configuredStatement("TERMINATE X", terminateQuery); when(terminateQuery.getQueryId()).thenReturn(Optional.of(QUERY_ID)); when(executionContext.getPersistentQuery(QUERY_ID)).thenReturn(Optional.of(query1)); when(query1.getPersistentQueryType()) .thenReturn(KsqlConstants.PersistentQueryType.CREATE_SOURCE); // When: final Exception e = assertThrows( KsqlStatementException.class, () -> commandFactory.create(configuredStatement, executionContext) ); // Then: assertThat(e.getMessage(), containsString( "Cannot terminate query 'FOO' because it is linked to a source table")); verify(query1, times(0)).close(); }
@Override public int getRemainingQueueCapacity() { return taskQ.remainingCapacity(); }
@Test public void getRemainingQueueCapacity_whenTaskSubmitted() { int queueSize = 10; ManagedExecutorService executorService = newManagedExecutorService(1, queueSize); CountDownLatch finishLatch = startLongRunningTask(executorService); try { executeNopTask(executorService); assertEquals(queueSize - 1, executorService.getRemainingQueueCapacity()); } finally { finishLatch.countDown(); } }
public static String trustedCertsEnvVar(List<CertSecretSource> trustedCertificates) { if (trustedCertificates != null && !trustedCertificates.isEmpty()) { List<String> paths = new ArrayList<>(); for (CertSecretSource certSecretSource : trustedCertificates) { if (certSecretSource.getCertificate() != null) { paths.add(certSecretSource.getSecretName() + "/" + certSecretSource.getCertificate()); } else if (certSecretSource.getPattern() != null) { paths.add(certSecretSource.getSecretName() + "/" + certSecretSource.getPattern()); } else { throw new InvalidResourceException("Certificate source does not contain the certificate or the pattern."); } } return String.join(";", paths); } else { return null; } }
@Test public void testTrustedCertsEnvVar() { CertSecretSource cert1 = new CertSecretSourceBuilder() .withSecretName("first-certificate") .withCertificate("ca.crt") .build(); CertSecretSource cert2 = new CertSecretSourceBuilder() .withSecretName("second-certificate") .withCertificate("tls.crt") .build(); CertSecretSource cert3 = new CertSecretSourceBuilder() .withSecretName("first-certificate") .withCertificate("ca2.crt") .build(); CertSecretSource cert4 = new CertSecretSourceBuilder() .withSecretName("third-certificate") .withCertificate("*.crt") .build(); CertSecretSource cert5 = new CertSecretSourceBuilder() .withSecretName("first-certificate") .withCertificate("*.pem") .build(); assertThat(CertUtils.trustedCertsEnvVar(List.of(cert1, cert2, cert3, cert4, cert5)), is("first-certificate/ca.crt;second-certificate/tls.crt;first-certificate/ca2.crt;third-certificate/*.crt;first-certificate/*.pem")); }
public void writeRequestAndGraphDetail(Collector collector, CollectorServer collectorServer, Range range, String requestId) throws IOException { try { document.open(); new PdfRequestAndGraphDetailReport(collector, collectorServer, range, requestId, pdfDocumentFactory, document).toPdf(); } catch (final DocumentException e) { throw createIOException(e); } document.close(); }
@Test public void testWriteRequestAndGraphDetail() throws IOException { final Counter sqlCounter = new Counter("sql", "db.png"); final Counter httpCounter = new Counter("http", "db.png", sqlCounter); final Counter errorCounter = new Counter("error", "db.png"); final List<Counter> counters = new ArrayList<>(); counters.add(httpCounter); counters.add(sqlCounter); counters.add(errorCounter); final Collector collector = new Collector("test", counters); final JavaInformations javaInformations = new JavaInformations(null, true); httpCounter.bindContext("test 1", "complete test 1", null, -1, -1); sqlCounter.bindContext("sql1", "sql 1", null, -1, -1); sqlCounter.addRequest("sql1", 100, 100, 100, false, -1); httpCounter.addRequest("test 1", 0, 0, 0, false, 1000); errorCounter.addRequestForSystemError("test error", 0, 0, 0, " a stack-trace"); collector.collectWithoutErrors(List.of(javaInformations)); final String requestId = httpCounter.getRequests().get(0).getId(); final String requestId2 = errorCounter.getRequests().get(0).getId(); final ByteArrayOutputStream output = new ByteArrayOutputStream(); final PdfOtherReport pdfOtherReport = new PdfOtherReport(TEST_APP, output); pdfOtherReport.writeRequestAndGraphDetail(collector, null, Period.TOUT.getRange(), requestId); assertNotEmptyAndClear(output); final PdfOtherReport pdfOtherReport2 = new PdfOtherReport(TEST_APP, output); pdfOtherReport2.writeRequestAndGraphDetail(collector, null, Period.TOUT.getRange(), requestId2); assertNotEmptyAndClear(output); JRobin.initBackendFactory(new Timer(getClass().getSimpleName(), true)); final String graphName = "usedMemory"; final PdfOtherReport pdfOtherReport3 = new PdfOtherReport(TEST_APP, output); pdfOtherReport3.writeRequestAndGraphDetail(collector, null, Period.TOUT.getRange(), graphName); assertNotEmptyAndClear(output); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore .store(QueryableStoreTypes.sessionStore(), partition); return KsMaterializedQueryResult.rowIterator( findSession(store, key, windowStart, windowEnd).iterator()); } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldCloseIterator() { // When: table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS); // Then: verify(fetchIterator).close(); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteSingleColumn() { // Given: final SingleColumn singleColumn = new SingleColumn( location, expression, Optional.of(ColumnName.of("foo")) ); when(expressionRewriter.apply(expression, context)).thenReturn(rewrittenExpression); // When: final AstNode rewritten = rewriter.rewrite(singleColumn, context); // Then: assertThat(rewritten, equalTo(new SingleColumn( location, rewrittenExpression, Optional.of(ColumnName.of("foo"))) )); }
public static void waitForCommandSequenceNumber( final CommandQueue commandQueue, final KsqlRequest request, final Duration timeout ) throws InterruptedException, TimeoutException { final Optional<Long> commandSequenceNumber = request.getCommandSequenceNumber(); if (commandSequenceNumber.isPresent()) { final long seqNum = commandSequenceNumber.get(); commandQueue.ensureConsumedPast(seqNum, timeout); } }
@Test public void shouldWaitIfSequenceNumberSpecified() throws Exception { // Given: when(request.getCommandSequenceNumber()).thenReturn(Optional.of(SEQUENCE_NUMBER)); // When: CommandStoreUtil.waitForCommandSequenceNumber(commandQueue, request, TIMEOUT); // Then: verify(commandQueue).ensureConsumedPast(SEQUENCE_NUMBER, TIMEOUT); }
public CompletableFuture<Long> compact(String topic) { return RawReader.create(pulsar, topic, COMPACTION_SUBSCRIPTION, false).thenComposeAsync( this::compactAndCloseReader, scheduler); }
@Test public void testCompactEmptyTopic() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; // trigger creation of topic on server side pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").subscribe().close(); compact(topic); }
public static NetFlowV5Packet parsePacket(ByteBuf bb) { final int readableBytes = bb.readableBytes(); final NetFlowV5Header header = parseHeader(bb.slice(bb.readerIndex(), HEADER_LENGTH)); final int packetLength = HEADER_LENGTH + header.count() * RECORD_LENGTH; if (header.count() <= 0 || readableBytes < packetLength) { throw new CorruptFlowPacketException("Insufficient data (expected: " + packetLength + " bytes, actual: " + readableBytes + " bytes)"); } final ImmutableList.Builder<NetFlowV5Record> records = ImmutableList.builder(); int offset = HEADER_LENGTH; for (int i = 0; i < header.count(); i++) { records.add(parseRecord(bb.slice(offset + bb.readerIndex(), RECORD_LENGTH))); offset += RECORD_LENGTH; } return NetFlowV5Packet.create(header, records.build(), offset); }
@Test public void pcap_netgraph_NetFlowV5() throws Exception { final List<NetFlowV5Record> allRecords = new ArrayList<>(); try (InputStream inputStream = Resources.getResource("netflow-data/netgraph-netflow5.pcap").openStream()) { final Pcap pcap = Pcap.openStream(inputStream); pcap.loop(packet -> { if (packet.hasProtocol(Protocol.UDP)) { final UDPPacket udp = (UDPPacket) packet.getPacket(Protocol.UDP); final ByteBuf byteBuf = Unpooled.wrappedBuffer(udp.getPayload().getArray()); final NetFlowV5Packet netFlowV5Packet = NetFlowV5Parser.parsePacket(byteBuf); assertThat(netFlowV5Packet).isNotNull(); allRecords.addAll(netFlowV5Packet.records()); } return true; } ); } assertThat(allRecords).hasSize(120); }
public static SerdeFeatures buildKeyFeatures( final LogicalSchema schema, final Format keyFormat ) { return buildKeyFeatures(keyFormat, schema.key().size() == 1); }
@Test public void shouldNotSetUnwrappedKeysIfKeyFormatsSupportsOnlyUnwrapping() { // When: final SerdeFeatures result = SerdeFeaturesFactory.buildKeyFeatures( SINGLE_FIELD_SCHEMA, KAFKA ); // Then: assertThat(result.findAny(SerdeFeatures.WRAPPING_FEATURES), is(Optional.empty())); }
public static boolean valueToBoolean(int value) { if (TRUE == value) { return true; } else if (FALSE == value) { return false; } else { throw new RuntimeException("Boolean value error, must be 0 or 1"); } }
@Test public void shouldThrowIfValueIsNotZeroOrOne() { assertThrows(RuntimeException.class, () -> BooleanUtils.valueToBoolean(123)); }
public BackupUploadDescriptor generateUpload(final String key) { if (key.isBlank()) { throw new IllegalArgumentException("Upload descriptors must have non-empty keys"); } final String entity = WRITE_ENTITY_PREFIX + key; final ExternalServiceCredentials credentials = credentialsGenerator.generateFor(entity); final String b64Key = Base64.getEncoder().encodeToString(key.getBytes(StandardCharsets.UTF_8)); final Map<String, String> headers = Map.of( HttpHeaders.AUTHORIZATION, HeaderUtils.basicAuthHeader(credentials), "Upload-Metadata", String.format("filename %s", b64Key)); return new BackupUploadDescriptor( BACKUP_CDN, key, headers, tusUri + "/" + CDN_PATH); }
@Test public void uploadGenerator() { Cdn3BackupCredentialGenerator generator = new Cdn3BackupCredentialGenerator(new TusConfiguration( new SecretBytes(TestRandomUtil.nextBytes(32)), "https://example.org/upload")); final BackupUploadDescriptor messageBackupUploadDescriptor = generator.generateUpload("subdir/key"); assertThat(messageBackupUploadDescriptor.signedUploadLocation()).isEqualTo("https://example.org/upload/backups"); assertThat(messageBackupUploadDescriptor.key()).isEqualTo("subdir/key"); assertThat(messageBackupUploadDescriptor.headers()).containsKey("Authorization"); final String username = parseUsername(messageBackupUploadDescriptor.headers().get("Authorization")); assertThat(username).isEqualTo("write$backups/subdir/key"); }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testCreateCatalogCustomWithHiveCatalogTypeSet() { String catalogName = "customCatalog"; props.put(CatalogProperties.CATALOG_IMPL, CustomHadoopCatalog.class.getName()); props.put( FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HIVE); assertThatThrownBy( () -> FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith( "Cannot create catalog customCatalog, both catalog-type and catalog-impl are set"); }
@Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, Retry retry, String methodName) throws Throwable { RetryTransformer<?> retryTransformer = RetryTransformer.of(retry); Object returnValue = proceedingJoinPoint.proceed(); return executeRxJava2Aspect(retryTransformer, returnValue); }
@Test public void testReactorTypes() throws Throwable { Retry retry = Retry.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test")); assertThat(rxJava2RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod")) .isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test")); assertThat(rxJava2RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod")) .isNotNull(); }
public static <T> Response call(RestUtils.RestCallable<T> callable, AlluxioConfiguration alluxioConf, @Nullable Map<String, Object> headers) { try { // TODO(cc): reconsider how to enable authentication if (SecurityUtils.isSecurityEnabled(alluxioConf) && AuthenticatedClientUser.get(alluxioConf) == null) { AuthenticatedClientUser.set(ServerUserState.global().getUser().getName()); } } catch (IOException e) { LOG.warn("Failed to set AuthenticatedClientUser in REST service handler: {}", e.toString()); return createErrorResponse(e, alluxioConf); } try { return createResponse(callable.call(), alluxioConf, headers); } catch (Exception e) { LOG.warn("Unexpected error invoking rest endpoint: {}", e.toString()); return createErrorResponse(e, alluxioConf); } }
@Test public void objectOkResponse() throws Exception { class Obj { private final int mStatus; private final String mMessage; Obj(int status, String message) { mStatus = status; mMessage = message; } public int getStatus() { return mStatus; } public String getMessage() { return mMessage; } } int status = 200; String message = "OK"; final Obj object = new Obj(status, message); Response response = RestUtils.call(new RestUtils.RestCallable<Obj>() { @Override public Obj call() throws Exception { return object; } }, Configuration.global()); Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); Obj obj = (Obj) response.getEntity(); Assert.assertEquals(status, obj.getStatus()); Assert.assertEquals(message, obj.getMessage()); }
public static <K, V> StoreBuilder<WindowStore<K, V>> windowStoreBuilder(final WindowBytesStoreSupplier supplier, final Serde<K> keySerde, final Serde<V> valueSerde) { Objects.requireNonNull(supplier, "supplier cannot be null"); return new WindowStoreBuilder<>(supplier, keySerde, valueSerde, Time.SYSTEM); }
@Test public void shouldThrowIfSupplierIsNullForWindowStoreBuilder() { final Exception e = assertThrows(NullPointerException.class, () -> Stores.windowStoreBuilder(null, Serdes.ByteArray(), Serdes.ByteArray())); assertEquals("supplier cannot be null", e.getMessage()); }
@Override public int rename(String oldPath, String newPath, int flags) { return AlluxioFuseUtils.call(LOG, () -> renameInternal(oldPath, newPath, flags), FuseConstants.FUSE_RENAME, "oldPath=%s,newPath=%s,", oldPath, newPath); }
@Test public void renameOldNotExist() throws Exception { AlluxioURI oldPath = BASE_EXPECTED_URI.join("/old"); AlluxioURI newPath = BASE_EXPECTED_URI.join("/new"); doThrow(new FileDoesNotExistException("File /old does not exist")) .when(mFileSystem).rename(oldPath, newPath); when(mFileSystem.getStatus(any(AlluxioURI.class))) .thenThrow(new FileDoesNotExistException("File /old does not exist")); assertEquals(-ErrorCodes.ENOENT(), mFuseFs.rename("/old", "/new", AlluxioJniRenameUtils.NO_FLAGS)); }
public Collection<RepositoryTuple> swapToRepositoryTuples(final YamlRuleConfiguration yamlRuleConfig) { RepositoryTupleEntity tupleEntity = yamlRuleConfig.getClass().getAnnotation(RepositoryTupleEntity.class); if (null == tupleEntity) { return Collections.emptyList(); } if (tupleEntity.leaf()) { return Collections.singleton(new RepositoryTuple(tupleEntity.value(), YamlEngine.marshal(yamlRuleConfig))); } Collection<RepositoryTuple> result = new LinkedList<>(); RuleNodePath ruleNodePath = TypedSPILoader.getService(RuleNodePathProvider.class, yamlRuleConfig.getRuleConfigurationType()).getRuleNodePath(); for (Field each : getFields(yamlRuleConfig.getClass())) { boolean isAccessible = each.isAccessible(); each.setAccessible(true); result.addAll(swapToRepositoryTuples(yamlRuleConfig, ruleNodePath, each)); each.setAccessible(isAccessible); } return result; }
@Test void assertSwapToRepositoryTuplesWithoutRepositoryTupleEntityAnnotation() { assertTrue(new RepositoryTupleSwapperEngine().swapToRepositoryTuples(new NoneYamlRuleConfiguration()).isEmpty()); }
public void logString(final DriverEventCode code, final String value) { final int length = value.length() + SIZE_OF_INT; final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(toEventCodeId(code), encodedLength); if (index > 0) { try { encode((UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, value); } finally { ringBuffer.commit(index); } } }
@Test void logString() { final int recordOffset = align(100, ALIGNMENT); logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset); final DriverEventCode eventCode = CMD_IN_ADD_PUBLICATION; final String value = "abc"; final int captureLength = value.length() + SIZE_OF_INT; logger.logString(eventCode, value); verifyLogHeader(logBuffer, recordOffset, toEventCodeId(eventCode), captureLength, captureLength); assertEquals(value, logBuffer.getStringAscii(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN)); }
@Nullable @Override public GenericRow decode(byte[] payload, GenericRow destination) { try { destination = (GenericRow) _decodeMethod.invoke(null, payload, destination); } catch (Exception e) { throw new RuntimeException(e); } return destination; }
@Test(dataProvider = "normalCases") public void whenNormalCases(String fieldName, Object protoVal, Object pinotVal) throws Exception { Descriptors.FieldDescriptor fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(fieldName); ComplexTypes.TestMessage.Builder messageBuilder = ComplexTypes.TestMessage.newBuilder(); messageBuilder.setField(fd, protoVal); GenericRow row = new GenericRow(); ProtoBufCodeGenMessageDecoder messageDecoder = setupDecoder("complex_types.jar", "org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes$TestMessage", getAllSourceFieldsForComplexType()); messageDecoder.decode(messageBuilder.build().toByteArray(), row); Assert.assertEquals(row.getValue(fd.getName()), pinotVal); }
public void ensureActiveGroup() { while (!ensureActiveGroup(time.timer(Long.MAX_VALUE))) { log.warn("still waiting to ensure active group"); } }
@Test public void testSyncGroupRequestWithFencedInstanceIdException() { setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); final int generation = -1; mockClient.prepareResponse(joinGroupFollowerResponse(generation, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.FENCED_INSTANCE_ID)); assertThrows(FencedInstanceIdException.class, () -> coordinator.ensureActiveGroup()); }
public String processOrder() { if (validateProduct() && processPayment()) { return "Order processed successfully"; } return "Order processing failed"; }
@Test void testProcessOrder_FailureWithProductValidationFailure() { // Arrange when(restTemplate.postForEntity(eq("http://localhost:30302/product/validate"), anyString(), eq(Boolean.class))) .thenReturn(ResponseEntity.ok(false)); // Act String result = orderService.processOrder(); // Assert assertEquals("Order processing failed", result); }
@Override public boolean tryInit(long expectedInsertions, double falseProbability) { return get(tryInitAsync(expectedInsertions, falseProbability)); }
@Test public void testEmptyRename() { RBloomFilter<String> bloomFilter = redisson.getBloomFilter("test"); bloomFilter.tryInit(1000, 0.01); bloomFilter.rename("test1"); assertThat(bloomFilter.isExists()).isTrue(); assertThat(redisson.getBloomFilter("test").isExists()).isFalse(); }
public void removeCommittedEntries(Map<TopicPartition, OffsetAndMetadata> committed, Exception exception) { if (exception == null) { committed.forEach(this::removeCommittedEntry); } else { LOG.error("Failed to commit offset: {}", exception.getMessage(), exception); } }
@Order(4) @Test @DisplayName("Tests whether the cache removes committed offsets") void removeCommittedEntries() { final TopicPartition topic11 = new TopicPartition("topic1", 1); final TopicPartition topic12 = new TopicPartition("topic1", 2); final TopicPartition topic13 = new TopicPartition("topic1", 3); final Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(topic12, new OffsetAndMetadata(3)); offsetCache.removeCommittedEntries(offsets, null); assertEquals(2, offsetCache.getOffset(topic11)); assertNull(offsetCache.getOffset(topic12)); assertEquals(5, offsetCache.getOffset(topic13)); assertEquals(2, offsetCache.cacheSize()); }
public List<IssueDto> sort() { String sort = query.sort(); Boolean asc = query.asc(); if (sort != null && asc != null) { return getIssueProcessor(sort).sort(issues, asc); } return issues; }
@Test public void should_sort_by_severity() { IssueDto issue1 = new IssueDto().setKee("A").setSeverity("INFO"); IssueDto issue2 = new IssueDto().setKee("B").setSeverity("BLOCKER"); IssueDto issue3 = new IssueDto().setKee("C").setSeverity("MAJOR"); List<IssueDto> dtoList = newArrayList(issue1, issue2, issue3); IssueQuery query = IssueQuery.builder().sort(IssueQuery.SORT_BY_SEVERITY).asc(true).build(); IssuesFinderSort issuesFinderSort = new IssuesFinderSort(dtoList, query); List<IssueDto> result = newArrayList(issuesFinderSort.sort()); assertThat(result).hasSize(3); assertThat(result.get(0).getSeverity()).isEqualTo("INFO"); assertThat(result.get(1).getSeverity()).isEqualTo("MAJOR"); assertThat(result.get(2).getSeverity()).isEqualTo("BLOCKER"); }
public static boolean isViewIgnored(View view) { try { //基本校验 if (view == null) { return true; } //ViewType 被忽略 List<Class<?>> mIgnoredViewTypeList = SensorsDataAPI.sharedInstance().getIgnoredViewTypeList(); if (mIgnoredViewTypeList != null) { for (Class<?> clazz : mIgnoredViewTypeList) { if (clazz.isAssignableFrom(view.getClass())) { return true; } } } //View 被忽略 return "1".equals(view.getTag(R.id.sensors_analytics_tag_view_ignored)); } catch (Exception e) { SALog.printStackTrace(e); return true; } }
@Test public void testIsViewIgnored() { SensorsDataAPI sensorsDataAPI = SAHelper.initSensors(mApplication); TextView textView1 = new TextView(mApplication); textView1.setText("child1"); sensorsDataAPI.ignoreView(textView1); Assert.assertTrue(SAViewUtils.isViewIgnored(textView1)); }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException { final Host bookmark = session.getHost(); final StringBuilder request = new StringBuilder(String.format("https://%s%s/apps/files_sharing/api/v1/shares?path=%s&shareType=%d&shareWith=%s", bookmark.getHostname(), new NextcloudHomeFeature(bookmark).find(NextcloudHomeFeature.Context.ocs).getAbsolute(), URIEncoder.encode(PathRelativizer.relativize(NextcloudHomeFeature.Context.files.home(bookmark).find().getAbsolute(), file.getAbsolute())), Sharee.world.equals(sharee) ? SHARE_TYPE_PUBLIC_LINK : SHARE_TYPE_USER, Sharee.world.equals(sharee) ? StringUtils.EMPTY : sharee.getIdentifier() )); final Credentials password = callback.prompt(bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"), MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()), new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk())); if(password.isPasswordAuthentication()) { request.append(String.format("&password=%s", URIEncoder.encode(password.getPassword()))); } final HttpPost resource = new HttpPost(request.toString()); resource.setHeader("OCS-APIRequest", "true"); resource.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType()); try { return session.getClient().execute(resource, new OcsDownloadShareResponseHandler()); } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test public void testToDownloadShareRoot() throws Exception { final Path home = new NextcloudHomeFeature(session.getHost()).find(); try { new NextcloudShareFeature(session).toDownloadUrl(home, Share.Sharee.world, null, new DisabledPasswordCallback() { @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) { return new Credentials(null, new AlphanumericRandomStringService(10).random()); } }); fail(); } catch(AccessDeniedException e) { assertEquals("You cannot share your root folder. Please contact your web hosting service provider for assistance.", e.getDetail()); } }
@Override public Long createNotice(NoticeSaveReqVO createReqVO) { NoticeDO notice = BeanUtils.toBean(createReqVO, NoticeDO.class); noticeMapper.insert(notice); return notice.getId(); }
@Test public void testCreateNotice_success() { // 准备参数 NoticeSaveReqVO reqVO = randomPojo(NoticeSaveReqVO.class) .setId(null); // 避免 id 被赋值 // 调用 Long noticeId = noticeService.createNotice(reqVO); // 校验插入属性是否正确 assertNotNull(noticeId); NoticeDO notice = noticeMapper.selectById(noticeId); assertPojoEquals(reqVO, notice, "id"); }
@Override public boolean onReportingPeriodEnd() { return true; }
@Test public void testOnReportingPeriodEnd() { assertTrue(strategy.onReportingPeriodEnd(), "onReportingPeriodEnd() should always return true."); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && node.has("integerDigits") && node.has("fractionalDigits") && isApplicableType(field)) { final Class<? extends Annotation> digitsClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? Digits.class : javax.validation.constraints.Digits.class; JAnnotationUse annotation = field.annotate(digitsClass); annotation.param("integer", node.get("integerDigits").asInt()); annotation.param("fraction", node.get("fractionalDigits").asInt()); } return field; }
@Test public void testHasIntegerAndFractionalDigits() { when(config.isIncludeJsr303Annotations()).thenReturn(true); final int intValue = new Random().nextInt(); final int fractionalValue = new Random().nextInt(); when(subNodeInteger.asInt()).thenReturn(intValue); when(subNodeFractional.asInt()).thenReturn(fractionalValue); when(node.get("integerDigits")).thenReturn(subNodeInteger); when(node.get("fractionalDigits")).thenReturn(subNodeFractional); when(fieldVar.annotate(digitsClass)).thenReturn(annotation); when(node.has("integerDigits")).thenReturn(true); when(node.has("fractionalDigits")).thenReturn(true); when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName()); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(digitsClass); verify(annotation, times(isApplicable ? 1 : 0)).param("integer", intValue); verify(annotation, times(isApplicable ? 1 : 0)).param("fraction", fractionalValue); }
public static <T> Point<T> interpolate(Point<T> p1, Point<T> p2, Instant targetTime) { checkNotNull(p1, "Cannot perform interpolation when the first input points is null"); checkNotNull(p2, "Cannot perform interpolation when the second input points is null"); checkNotNull(targetTime, "Cannot perform interpolation when the targetTime is null"); checkArgument( p1.time().isBefore(p2.time()) || p1.time().equals(p2.time()), "The input points must be in chronological order" ); TimeWindow window = TimeWindow.of(p1.time(), p2.time()); checkArgument( window.contains(targetTime), "The targetTime is outside the required time window" ); if (p1.time().equals(targetTime)) { return (new PointBuilder<T>(p1)).build(); } else if (p2.time().equals(targetTime)) { return (new PointBuilder<T>(p2)).build(); } else { double fraction = window.toFractionOfRange(targetTime); //build an interpolated point LatLong interpolatedLatLong = interpolateLatLong(p1.latLong(), p2.latLong(), fraction); Double interpolatedCourseInDegrees = interpolateCourse( isNull(p1.course()) ? null : p1.course().inDegrees(), isNull(p2.course()) ? null : p2.course().inDegrees(), fraction ); //correct the interpolated course when one of the input values was null if (interpolatedCourseInDegrees == null) { interpolatedCourseInDegrees = Spherical.courseInDegrees(p1.latLong(), p2.latLong()); } double interpolatedSpeed = interpolateSpeed(p1, p2, fraction); Distance interpolatedAltitude = interpolate( p1.altitude(), p2.altitude(), fraction ); //return a copy of the 1st input point but with corrected trajectory data return (new PointBuilder<T>(p1)) .latLong(interpolatedLatLong) .course(Course.ofDegrees(interpolatedCourseInDegrees)) .speed(Speed.ofKnots(interpolatedSpeed)) .altitude(interpolatedAltitude) .time(targetTime) .build(); } }
@Test public void testInterpolatePoint3() { /* * Test the interpolation works properly at the "end" of the timewindow */ Point<String> p1 = (new PointBuilder<String>()) .time(Instant.EPOCH) .altitude(Distance.ofFeet(1000.0)) .courseInDegrees(120.0) .latLong(new LatLong(0.0, 10.0)) .speedInKnots(200.0) .build(); Point<String> p2 = (new PointBuilder<String>()) .time(Instant.EPOCH.plusSeconds(8)) .altitude(Distance.ofFeet(500.0)) .courseInDegrees(130.0) .latLong(new LatLong(5.0, 15.0)) .speedInKnots(300.0) .build(); Point<String> testPoint = interpolate(p1, p2, Instant.EPOCH.plusSeconds(8)); double TOLERANCE = 0.0001; assertEquals( Instant.EPOCH.plusSeconds(8), testPoint.time() ); assertEquals( 500.0, testPoint.altitude().inFeet(), TOLERANCE ); assertEquals( 130.0, testPoint.course().inDegrees(), TOLERANCE ); assertEquals(LatLong.of(5.0, 15.0), testPoint.latLong()); assertEquals( 300.0, testPoint.speed().inKnots(), TOLERANCE ); }
@Override public String toString() { return "ResourceConfig{" + "url=" + url + ", id='" + id + '\'' + ", resourceType=" + resourceType + '}'; }
@Test public void when_addNonexistentZipOfJarsWithFile_then_throwsException() { // Given String path = Paths.get("/i/do/not/exist").toString(); File file = new File(path); // Then expectedException.expect(JetException.class); expectedException.expectMessage("Not an existing, readable file: " + path); // When config.addJarsInZip(file); }
public static long calculateIntervalEnd(long startTs, IntervalType intervalType, ZoneId tzId) { var startTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTs), tzId); switch (intervalType) { case WEEK: return startTime.truncatedTo(ChronoUnit.DAYS).with(WeekFields.SUNDAY_START.dayOfWeek(), 1).plusDays(7).toInstant().toEpochMilli(); case WEEK_ISO: return startTime.truncatedTo(ChronoUnit.DAYS).with(WeekFields.ISO.dayOfWeek(), 1).plusDays(7).toInstant().toEpochMilli(); case MONTH: return startTime.truncatedTo(ChronoUnit.DAYS).withDayOfMonth(1).plusMonths(1).toInstant().toEpochMilli(); case QUARTER: return startTime.truncatedTo(ChronoUnit.DAYS).with(IsoFields.DAY_OF_QUARTER, 1).plusMonths(3).toInstant().toEpochMilli(); default: throw new RuntimeException("Not supported!"); } }
@Test void testMonthEnd() { long ts = 1704899727000L; // Wednesday, January 10 15:15:27 GMT assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.MONTH, ZoneId.of("Europe/Kyiv"))).isEqualTo(1706738400000L); // Thursday, February 1, 2024 0:00:00 GMT+02:00 assertThat(TimeUtils.calculateIntervalEnd(ts, IntervalType.MONTH, ZoneId.of("Europe/Amsterdam"))).isEqualTo(1706742000000L); // Monday, January 15, 2024 0:00:00 GMT+02:00 }
public static String desc2name(String desc) { StringBuilder sb = new StringBuilder(); int c = desc.lastIndexOf('[') + 1; if (desc.length() == c + 1) { switch (desc.charAt(c)) { case JVM_VOID: { sb.append("void"); break; } case JVM_BOOLEAN: { sb.append("boolean"); break; } case JVM_BYTE: { sb.append("byte"); break; } case JVM_CHAR: { sb.append("char"); break; } case JVM_DOUBLE: { sb.append("double"); break; } case JVM_FLOAT: { sb.append("float"); break; } case JVM_INT: { sb.append("int"); break; } case JVM_LONG: { sb.append("long"); break; } case JVM_SHORT: { sb.append("short"); break; } default: throw new RuntimeException(); } } else { sb.append(desc.substring(c + 1, desc.length() - 1).replace('/', '.')); } while (c-- > 0) { sb.append("[]"); } return sb.toString(); }
@Test void testDesc2name() { // desc2name assertEquals("short[]", ReflectUtils.desc2name(ReflectUtils.getDesc(short[].class))); assertEquals("boolean[]", ReflectUtils.desc2name(ReflectUtils.getDesc(boolean[].class))); assertEquals("byte[]", ReflectUtils.desc2name(ReflectUtils.getDesc(byte[].class))); assertEquals("char[]", ReflectUtils.desc2name(ReflectUtils.getDesc(char[].class))); assertEquals("double[]", ReflectUtils.desc2name(ReflectUtils.getDesc(double[].class))); assertEquals("float[]", ReflectUtils.desc2name(ReflectUtils.getDesc(float[].class))); assertEquals("int[]", ReflectUtils.desc2name(ReflectUtils.getDesc(int[].class))); assertEquals("long[]", ReflectUtils.desc2name(ReflectUtils.getDesc(long[].class))); assertEquals("int", ReflectUtils.desc2name(ReflectUtils.getDesc(int.class))); assertEquals("void", ReflectUtils.desc2name(ReflectUtils.getDesc(void.class))); assertEquals("java.lang.Object[][]", ReflectUtils.desc2name(ReflectUtils.getDesc(Object[][].class))); }
public static Format of(final FormatInfo formatInfo) { final Format format = fromName(formatInfo.getFormat().toUpperCase()); format.validateProperties(formatInfo.getProperties()); return format; }
@Test public void shouldThrowWhenAttemptingToUseValueDelimiterWithJsonFormat() { // Given: final FormatInfo format = FormatInfo.of("JSON", ImmutableMap.of("delimiter", "x")); // When: final Exception e = assertThrows( KsqlException.class, () -> FormatFactory.of(format) ); // Then: assertThat(e.getMessage(), containsString("JSON does not support the following configs: [delimiter]")); }
@Override public String authenticateRequest(final HttpServletRequest request) { final String smUser = request.getHeader(SITE_MINDER_HEADER.getValue()); if (smUser == null || smUser.trim().isEmpty()) { // SiteMinder has not authenticated the user return null; } else { return smUser; } }
@Test public void willAuthenticateAUserWithDifferentHeader() { final String userId = "a-test-user"; final String header = "a-custom-header"; SiteMinderServletRequestAuthenticator.SITE_MINDER_HEADER.setValue(header); doReturn(userId).when(request).getHeader(header); final String authenticatedUser = authenticator.authenticateRequest(request); assertThat(authenticatedUser, is(userId)); }
public static Map<String, String> parseToMap(String attributesModification) { if (Strings.isNullOrEmpty(attributesModification)) { return new HashMap<>(); } // format: +key1=value1,+key2=value2,-key3,+key4=value4 Map<String, String> attributes = new HashMap<>(); String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA); for (String kv : kvs) { String key; String value; if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) { String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN); key = splits[0]; value = splits[1]; if (!key.contains(ATTR_ADD_PLUS_SIGN)) { throw new RuntimeException("add/alter attribute format is wrong: " + key); } } else { key = kv; value = ""; if (!key.contains(ATTR_DELETE_MINUS_SIGN)) { throw new RuntimeException("delete attribute format is wrong: " + key); } } String old = attributes.put(key, value); if (old != null) { throw new RuntimeException("key duplication: " + key); } } return attributes; }
@Test(expected = RuntimeException.class) public void parseToMap_DuplicateKey_ThrowsRuntimeException() { String attributesModification = "+key1=value1,+key1=value2"; AttributeParser.parseToMap(attributesModification); }
@VisibleForTesting static File checkHadoopHomeInner(String home) throws FileNotFoundException { // couldn't find either setting for hadoop's home directory if (home == null) { throw new FileNotFoundException(E_HADOOP_PROPS_UNSET); } // strip off leading and trailing double quotes while (home.startsWith("\"")) { home = home.substring(1); } while (home.endsWith("\"")) { home = home.substring(0, home.length() - 1); } // after stripping any quotes, check for home dir being non-empty if (home.isEmpty()) { throw new FileNotFoundException(E_HADOOP_PROPS_EMPTY); } // check that the hadoop home dir value // is an absolute reference to a directory File homedir = new File(home); if (!homedir.isAbsolute()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " " + E_IS_RELATIVE); } if (!homedir.exists()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " " + E_DOES_NOT_EXIST); } if (!homedir.isDirectory()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " "+ E_NOT_DIRECTORY); } return homedir; }
@Test public void testHadoopHomeValidQuoted() throws Throwable { File f = checkHadoopHomeInner('"'+ rootTestDir.getCanonicalPath() + '"'); assertEquals(rootTestDir, f); }
public static List<CompressionType> getCompressionTypesFromAcceptedList(List<Byte> acceptedCompressionTypes) { if (acceptedCompressionTypes == null || acceptedCompressionTypes.isEmpty()) { return Collections.emptyList(); } List<CompressionType> result = new ArrayList<>(); for (Byte compressionByte : acceptedCompressionTypes) { int compressionId = compressionByte.intValue(); try { CompressionType compressionType = CompressionType.forId(compressionId); result.add(compressionType); } catch (IllegalArgumentException e) { log.warn("Accepted compressionByte type with ID {} is not a known compressionByte type; ignoring", compressionId, e); } } return result; }
@Test public void testGetCompressionTypesFromAcceptedList() { assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(null).size()); assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(Collections.emptyList()).size()); List<Byte> compressionTypes = new ArrayList<>(); compressionTypes.add(CompressionType.GZIP.id); compressionTypes.add(CompressionType.LZ4.id); compressionTypes.add(CompressionType.SNAPPY.id); compressionTypes.add(CompressionType.ZSTD.id); compressionTypes.add(CompressionType.NONE.id); compressionTypes.add((byte) -1); // should take the first compression type assertEquals(5, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(compressionTypes).size()); }
public void deletePartitionMetadataTable() { List<String> ddl = new ArrayList<>(); if (this.isPostgres()) { ddl.add("DROP INDEX \"" + CREATED_AT_START_TIMESTAMP_INDEX + "\""); ddl.add("DROP INDEX \"" + WATERMARK_INDEX + "\""); ddl.add("DROP TABLE \"" + tableName + "\""); } else { ddl.add("DROP INDEX " + CREATED_AT_START_TIMESTAMP_INDEX); ddl.add("DROP INDEX " + WATERMARK_INDEX); ddl.add("DROP TABLE " + tableName); } OperationFuture<Void, UpdateDatabaseDdlMetadata> op = databaseAdminClient.updateDatabaseDdl(instanceId, databaseId, ddl, null); try { // Initiate the request which returns an OperationFuture. op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES); } catch (ExecutionException | TimeoutException e) { // If the operation failed or timed out during execution, expose the cause. if (e.getCause() != null) { throw (SpannerException) e.getCause(); } else { throw SpannerExceptionFactory.asSpannerException(e); } } catch (InterruptedException e) { // Throw when a thread is waiting, sleeping, or otherwise occupied, // and the thread is interrupted, either before or during the activity. throw SpannerExceptionFactory.propagateInterrupt(e); } }
@Test public void testDeletePartitionMetadataTableWithTimeoutException() throws Exception { when(op.get(10, TimeUnit.MINUTES)).thenThrow(new TimeoutException(TIMED_OUT)); try { partitionMetadataAdminDao.deletePartitionMetadataTable(); fail(); } catch (SpannerException e) { assertTrue(e.getMessage().contains(TIMED_OUT)); } }
@Override public int getOrder() { return PluginEnum.RPC_PARAM_TRANSFORM.getCode(); }
@Test public void testGetOrder() { int result = rpcParamTransformPlugin.getOrder(); Assertions.assertEquals(PluginEnum.RPC_PARAM_TRANSFORM.getCode(), result); }
public static byte[] decompress(byte[] bytes) { if (bytes == null) { throw new NullPointerException("bytes is null"); } ByteArrayOutputStream out = new ByteArrayOutputStream(); try (GZIPInputStream gunzip = new GZIPInputStream(new ByteArrayInputStream(bytes))) { byte[] buffer = new byte[BUFFER_SIZE]; int n; while ((n = gunzip.read(buffer)) > -1) { out.write(buffer, 0, n); } return out.toByteArray(); } catch (IOException e) { throw new RuntimeException("gzip decompress error", e); } }
@Test public void test_decompress() { Assertions.assertThrows(NullPointerException.class, () -> { GzipUtil.decompress(null); }); Assertions.assertThrows(RuntimeException.class, () -> { GzipUtil.decompress(new byte[0]); }); Assertions.assertThrows(RuntimeException.class, () -> { byte[] bytes = {0x1, 0x2}; GzipUtil.decompress(bytes); }); }
public String build( final String cellValue ) { switch ( type ) { case FORALL: return buildForAll( cellValue ); case INDEXED: return buildMulti( cellValue ); default: return buildSingle( cellValue ); } }
@Test public void testForAllAndCSVMultiple() { final String snippet = "forall(&&){something == $ || something == $}"; final SnippetBuilder snip = new SnippetBuilder(snippet); final String result = snip.build("x, y"); assertThat(result).isEqualTo("something == x || something == x && something == y || something == y"); }
public static String encodeHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_CHARS[v >>> 4]; hexChars[j * 2 + 1] = HEX_CHARS[v & 0x0F]; } return new String(hexChars); }
@Test public void testEncodeHex() { String input = ""; String output = ""; assertEquals(new String(StringUtils.encodeHex(input.getBytes(StandardCharsets.UTF_8))), output); input = "foo bar 123"; output = "666f6f2062617220313233"; assertEquals(new String(StringUtils.encodeHex(input.getBytes(StandardCharsets.UTF_8))), output); }
public DiscoveryDataChangedEvent(final String key, final String value, final Event event) { this.key = key; this.value = value; this.event = event; }
@Test public void testDiscoveryDataChangedEvent() { String key = "key"; String value = "value"; DiscoveryDataChangedEvent discoveryDataChangedEvent = new DiscoveryDataChangedEvent(key, value, event); Assertions.assertEquals(key, discoveryDataChangedEvent.getKey()); Assertions.assertEquals(value, discoveryDataChangedEvent.getValue()); Assertions.assertEquals(event, discoveryDataChangedEvent.getEvent()); }
@Override public int getMajorJavaVersion() { // Check properties for version if (project.getProperties().getProperty("maven.compiler.target") != null) { return getVersionFromString(project.getProperties().getProperty("maven.compiler.target")); } if (project.getProperties().getProperty("maven.compiler.release") != null) { return getVersionFromString(project.getProperties().getProperty("maven.compiler.release")); } // Check maven-compiler-plugin for version Plugin mavenCompilerPlugin = project.getPlugin("org.apache.maven.plugins:maven-compiler-plugin"); if (mavenCompilerPlugin != null) { Xpp3Dom pluginConfiguration = (Xpp3Dom) mavenCompilerPlugin.getConfiguration(); Optional<String> target = getChildValue(pluginConfiguration, "target"); if (target.isPresent()) { return getVersionFromString(target.get()); } Optional<String> release = getChildValue(pluginConfiguration, "release"); if (release.isPresent()) { return getVersionFromString(release.get()); } } return 6; // maven-compiler-plugin default is 1.6 }
@Test public void testGetMajorJavaVersion_undefinedDefaultsTo6() { assertThat(mavenProjectProperties.getMajorJavaVersion()).isEqualTo(6); }
public int size() { return producer.size(); }
@Test public void shouldNotOverrunBuffer() { for (int i = 0; i < REQUESTED_CAPACITY; i++) { assertTrue(producer.claim()); assertTrue(producer.commit()); } for (int i = REQUESTED_CAPACITY; i < MAXIMUM_CAPACITY; i++) { // Unknown what happens here. producer.claim(); producer.commit(); } assertFalse(producer.claim()); assertTrue(channel.size() >= REQUESTED_CAPACITY); assertTrue(channel.size() <= MAXIMUM_CAPACITY); }
@NonNull public String processShownotes() { String shownotes = rawShownotes; if (TextUtils.isEmpty(shownotes)) { Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message"); shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>"; } // replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) { shownotes = shownotes.replace("\n", "<br />"); } Document document = Jsoup.parse(shownotes); cleanCss(document); document.head().appendElement("style").attr("type", "text/css").text(webviewStyle); addTimecodes(document); return document.toString(); }
@Test public void testProcessShownotesAddTimecodeHmmssNoChapters() { final String timeStr = "2:11:12"; final long time = 2 * 60 * 60 * 1000 + 11 * 60 * 1000 + 12 * 1000; String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>"; ShownotesCleaner t = new ShownotesCleaner(context, shownotes, Integer.MAX_VALUE); String res = t.processShownotes(); checkLinkCorrect(res, new long[]{time}, new String[]{timeStr}); }
public static URI parse(String featureIdentifier) { requireNonNull(featureIdentifier, "featureIdentifier may not be null"); if (featureIdentifier.isEmpty()) { throw new IllegalArgumentException("featureIdentifier may not be empty"); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(featureIdentifier)) { String standardized = replaceNonStandardPathSeparator(featureIdentifier); return parseAssumeFileScheme(standardized); } if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) { return parseAssumeFileScheme(featureIdentifier); } if (probablyURI(featureIdentifier)) { return parseProbableURI(featureIdentifier); } return parseAssumeFileScheme(featureIdentifier); }
@Test @DisabledOnOs(WINDOWS) void can_parse_absolute_file_form() { URI uri = FeaturePath.parse("file:/path/to/file.feature"); assertAll( () -> assertThat(uri.getScheme(), is("file")), () -> assertThat(uri.getSchemeSpecificPart(), is("/path/to/file.feature"))); }
public static FallbackMethod create(String fallbackMethodName, Method originalMethod, Object[] args, Object original, Object proxy) throws NoSuchMethodException { MethodMeta methodMeta = new MethodMeta( fallbackMethodName, originalMethod.getParameterTypes(), originalMethod.getReturnType(), original.getClass()); Map<Class<?>, Method> methods = FALLBACK_METHODS_CACHE .computeIfAbsent(methodMeta, FallbackMethod::extractMethods); if (!methods.isEmpty()) { return new FallbackMethod(methods, originalMethod.getReturnType(), args, original, proxy); } else { throw new NoSuchMethodException(String.format("%s %s.%s(%s,%s)", methodMeta.returnType, methodMeta.targetClass, methodMeta.fallbackMethodName, StringUtils.arrayToDelimitedString(methodMeta.params, ","), Throwable.class)); } }
@Test public void shouldFailIf2FallBackMethodsHandleSameException() throws Throwable { FallbackMethodTest target = new FallbackMethodTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); assertThatThrownBy(() -> FallbackMethod .create("returnMismatchFallback", testMethod, new Object[]{"test"}, target, target)) .isInstanceOf(NoSuchMethodException.class) .hasMessage( "class java.lang.String class io.github.resilience4j.fallback.FallbackMethodTest.returnMismatchFallback(class java.lang.String,class java.lang.Throwable)"); }
@GetMapping("/list") public AdminResult<List<OperationRecordLog>> list() { return ResultUtil.ok(recordLogService.list()); }
@Test public void testList() throws Exception { List<OperationRecordLog> operationRecordLogs = new ArrayList<>(); given(this.operationRecordLogService.list()).willReturn(operationRecordLogs); this.mockMvc.perform(MockMvcRequestBuilders.get("/operation-record/log/list")) .andExpect(status().isOk()) .andReturn(); }
@Override public Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException { return combineAndGetUpdateValue(currentValue, schema); }
@Test public void testDelete() { Schema avroSchema = new Schema.Parser().parse(AVRO_SCHEMA_STRING); GenericRecord deleteRecord = new GenericData.Record(avroSchema); Properties properties = new Properties(); deleteRecord.put("field1", 2); deleteRecord.put("Op", "D"); GenericRecord oldRecord = new GenericData.Record(avroSchema); oldRecord.put("field1", 2); oldRecord.put("Op", "U"); AWSDmsAvroPayload payload = new AWSDmsAvroPayload(Option.of(deleteRecord)); try { Option<IndexedRecord> outputPayload = payload.combineAndGetUpdateValue(oldRecord, avroSchema, properties); // expect nothing to be committed to table assertFalse(outputPayload.isPresent()); assertTrue(payload.isDeleted(avroSchema, properties)); } catch (Exception e) { fail("Unexpected exception"); } }
public static Function<Integer, Integer> composeFunctions(Function<Integer, Integer> f1, Function<Integer, Integer> f2) { return f1.andThen(f2); }
@Test public void testComposeToZero() { Function<Integer, Integer> multiply = x -> x * 10; Function<Integer, Integer> toZero = x -> 0; Function<Integer, Integer> composed = FunctionComposer.composeFunctions(multiply, toZero); assertEquals("Expected output of function composition leading to zero is 0", 0, (int) composed.apply(5)); }
void checkListenerMd5() { for (ManagerListenerWrap wrap : listeners) { if (!md5.equals(wrap.lastCallMd5)) { safeNotifyListener(dataId, group, content, type, md5, encryptedDataKey, wrap); } } }
@Test void testCheckListenerMd5() throws NacosException { ConfigFilterChainManager filter = new ConfigFilterChainManager(new Properties()); final CacheData data = new CacheData(filter, "name1", "key", "group", "tenant"); final List<String> list = new ArrayList<>(); Listener listener = new Listener() { @Override public Executor getExecutor() { return Runnable::run; } @Override public void receiveConfigInfo(String configInfo) { list.add(configInfo); } }; data.addListener(listener); data.checkListenerMd5(); assertTrue(data.checkListenersMd5Consistent()); assertEquals(0, list.size()); data.setContent("new"); assertFalse(data.checkListenersMd5Consistent()); data.checkListenerMd5(); assertEquals(1, list.size()); assertEquals("new", list.get(0)); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewName(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewName()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename() { connection.stringCommands().set(originalKey, value).block(); if (hasTtl) { connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block(); } Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(value); if (hasTtl) { assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0); } else { assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1); } }
@Override public Message request(final Message msg, final long timeout) throws RequestTimeoutException, MQClientException, RemotingException, MQBrokerException, InterruptedException { msg.setTopic(withNamespace(msg.getTopic())); return this.defaultMQProducerImpl.request(msg, timeout); }
@Test public void testAsyncRequest_OnSuccess() throws Exception { when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute()); final CountDownLatch countDownLatch = new CountDownLatch(1); RequestCallback requestCallback = new RequestCallback() { @Override public void onSuccess(Message message) { assertThat(message).isExactlyInstanceOf(MessageExt.class); assertThat(message.getTopic()).isEqualTo("FooBar"); assertThat(message.getBody()).isEqualTo(new byte[] {'a'}); assertThat(message.getFlag()).isEqualTo(1); countDownLatch.countDown(); } @Override public void onException(Throwable e) { } }; producer.request(message, requestCallback, 3 * 1000L); ConcurrentHashMap<String, RequestResponseFuture> responseMap = RequestFutureHolder.getInstance().getRequestFutureTable(); assertThat(responseMap).isNotNull(); MessageExt responseMsg = new MessageExt(); responseMsg.setTopic(message.getTopic()); responseMsg.setBody(message.getBody()); responseMsg.setFlag(1); for (Map.Entry<String, RequestResponseFuture> entry : responseMap.entrySet()) { RequestResponseFuture future = entry.getValue(); future.setSendRequestOk(true); future.getRequestCallback().onSuccess(responseMsg); } countDownLatch.await(defaultTimeout, TimeUnit.MILLISECONDS); }
public synchronized void reload(long checkpointId) { this.accCkp += 1; if (this.accCkp > 1) { // do not clean the new file assignment state for the first checkpoint, // this #reload calling is triggered by checkpoint success event, the coordinator // also relies on the checkpoint success event to commit the inflight instant, // and very possibly this component receives the notification before the coordinator, // if we do the cleaning, the records processed within the time range: // (start checkpoint, checkpoint success(and instant committed)) // would be assigned to a fresh new data bucket which is not the right behavior. this.newFileAssignStates.clear(); this.accCkp = 0; } this.smallFileAssignMap.clear(); this.writeProfile.reload(checkpointId); }
@Test public void testWriteProfileMetadataCache() throws Exception { WriteProfile writeProfile = new WriteProfile(writeConfig, context); assertTrue(writeProfile.getMetadataCache().isEmpty(), "Empty table should no have any instant metadata"); // write 3 instants of data for (int i = 0; i < 3; i++) { TestData.writeData(TestData.DATA_SET_INSERT, conf); } // the record profile triggers the metadata loading writeProfile.reload(1); assertThat("Metadata cache should have same number entries as timeline instants", writeProfile.getMetadataCache().size(), is(3)); writeProfile.getSmallFiles("par1"); assertThat("The metadata should be reused", writeProfile.getMetadataCache().size(), is(3)); }
@PostMapping("/api/v1/meetings/{uuid}/schedules") public void create( @PathVariable String uuid, @AuthAttendee long id, @RequestBody @Valid ScheduleCreateRequest request ) { scheduleService.create(uuid, id, request); }
@DisplayName("참가자가 스케줄을 생성하는데 성공하면 200 상태 코드를 응답한다.") @Test void create() { AttendeeLoginRequest loginRequest = new AttendeeLoginRequest(attendee.name(), attendee.password()); String token = RestAssured.given().log().all() .contentType(ContentType.JSON) .body(loginRequest) .when().post("/api/v1/meetings/{uuid}/login", meeting.getUuid()) .then().log().all() .statusCode(HttpStatus.OK.value()) .extract().cookie("ACCESS_TOKEN"); List<LocalTime> times = List.of(Timeslot.TIME_0100.startTime(), Timeslot.TIME_0130.startTime()); List<DateTimesCreateRequest> dateTimes = List.of( new DateTimesCreateRequest(today.getDate(), times), new DateTimesCreateRequest(tomorrow.getDate(), times) ); ScheduleCreateRequest scheduleCreateRequest = new ScheduleCreateRequest(dateTimes); RestAssured.given().log().all() .cookie("ACCESS_TOKEN", token) .pathParam("uuid", meeting.getUuid()) .contentType(ContentType.JSON) .body(scheduleCreateRequest) .when().post("/api/v1/meetings/{uuid}/schedules") .then().log().all() .statusCode(HttpStatus.OK.value()); }
@Override public void emitWatermark(Watermark watermark) { final long newWatermark = watermark.getTimestamp(); if (newWatermark <= maxWatermarkSoFar) { return; } maxWatermarkSoFar = newWatermark; watermarkEmitted.updateCurrentEffectiveWatermark(maxWatermarkSoFar); try { markActiveInternally(); output.emitWatermark( new org.apache.flink.streaming.api.watermark.Watermark(newWatermark)); } catch (ExceptionInChainedOperatorException e) { throw e; } catch (Exception e) { throw new ExceptionInChainedOperatorException(e); } }
@Test void testInitialZeroWatermark() { final CollectingDataOutput<Object> testingOutput = new CollectingDataOutput<>(); final WatermarkToDataOutput wmOutput = new WatermarkToDataOutput(testingOutput); wmOutput.emitWatermark(new org.apache.flink.api.common.eventtime.Watermark(0L)); assertThat(testingOutput.events).contains(new Watermark(0L)); }
DataTableType lookupTableTypeByType(Type type) { return lookupTableTypeByType(type, Function.identity()); }
@Test void null_big_integer_transformed_to_null() { DataTableTypeRegistry registry = new DataTableTypeRegistry(Locale.ENGLISH); DataTableType dataTableType = registry.lookupTableTypeByType(LIST_OF_LIST_OF_BIG_INTEGER); assertEquals( singletonList(singletonList(null)), dataTableType.transform(singletonList(singletonList(null)))); }
MessageFormatter formatter() { return formatter; }
@SuppressWarnings("deprecation") @Test public void testNewAndDeprecateGroupMetadataMessageFormatter() throws Exception { String[] deprecatedGroupMetadataMessageFormatter = generateArgsForFormatter("kafka.coordinator.group.GroupMetadataManager$GroupMetadataMessageFormatter"); assertInstanceOf(kafka.coordinator.group.GroupMetadataManager.GroupMetadataMessageFormatter.class, new ConsoleConsumerOptions(deprecatedGroupMetadataMessageFormatter).formatter()); String[] groupMetadataMessageFormatter = generateArgsForFormatter("org.apache.kafka.tools.consumer.GroupMetadataMessageFormatter"); assertInstanceOf(GroupMetadataMessageFormatter.class, new ConsoleConsumerOptions(groupMetadataMessageFormatter).formatter()); }
public List<QueuePath> getWildcardedQueuePaths(int maxAutoCreatedQueueDepth) { List<QueuePath> wildcardedPaths = new ArrayList<>(); // Start with the most explicit format (without wildcard) wildcardedPaths.add(this); String[] pathComponents = getPathComponents(); int supportedWildcardLevel = getSupportedWildcardLevel(maxAutoCreatedQueueDepth); // Collect all template entries for (int wildcardLevel = 1; wildcardLevel <= supportedWildcardLevel; wildcardLevel++) { int wildcardedComponentIndex = pathComponents.length - wildcardLevel; pathComponents[wildcardedComponentIndex] = WILDCARD_QUEUE; QueuePath wildcardedPath = createFromQueues(pathComponents); wildcardedPaths.add(wildcardedPath); } return wildcardedPaths; }
@Test public void testWildcardedQueuePathsWithRootPath() { int maxAutoCreatedQueueDepth = 1; List<QueuePath> expectedPaths = new ArrayList<>(); expectedPaths.add(ROOT_PATH); List<QueuePath> wildcardedPaths = ROOT_PATH.getWildcardedQueuePaths(maxAutoCreatedQueueDepth); Assert.assertEquals(expectedPaths, wildcardedPaths); }
public static String pickBestEncoding(String acceptHeader, Set<String> customMimeTypesSupported) { return pickBestEncoding(acceptHeader, null, customMimeTypesSupported); }
@Test public void testPickBestEncodingWithSupportedMimeTypes() { Assert.assertEquals(RestUtils.pickBestEncoding(PSON_TYPE_HEADER_WITH_VALID_PARAMS_JSON, Arrays.asList(JSON_HEADER),Collections.emptySet()), JSON_HEADER); Assert.assertEquals(RestUtils.pickBestEncoding(PSON_TYPE_HEADER_WITH_VALID_PARAMS_JSON, Arrays.asList(), Collections.emptySet()), PSON_HEADER); }
@JsonProperty public void setArchive(boolean archive) { this.archive = archive; }
@Test void defaultIsNotNeverBlock() { FileAppenderFactory<ILoggingEvent> fileAppenderFactory = new FileAppenderFactory<>(); fileAppenderFactory.setArchive(false); // default neverBlock assertThat(fileAppenderFactory.build(new LoggerContext(), "test", new DropwizardLayoutFactory(), new NullLevelFilterFactory<>(), new AsyncLoggingEventAppenderFactory())) .isInstanceOfSatisfying(AsyncAppender.class, asyncAppender -> assertThat(asyncAppender.isNeverBlock()).isFalse()); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public AppInfo get() { return getAppInfo(); }
@Test public void testInfoDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("info/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyAMInfo(json.getJSONObject("info"), appContext); }
public boolean isAllowable() { long now = System.currentTimeMillis(); if (now > lastResetTime.get() + interval) { token.set(rate); lastResetTime.set(now); } return token.decrementAndGet() >= 0; }
@Test void testIsAllowable() throws Exception { statItem = new StatItem("test", 5, 1000L); long lastResetTime = statItem.getLastResetTime(); assertTrue(statItem.isAllowable()); Thread.sleep(1100L); assertTrue(statItem.isAllowable()); assertTrue(lastResetTime != statItem.getLastResetTime()); assertEquals(4, statItem.getToken()); }
@Override // Camel calls this method if the endpoint isSynchronous(), as the // KafkaEndpoint creates a SynchronousDelegateProducer for it public void process(Exchange exchange) throws Exception { // is the message body a list or something that contains multiple values Message message = exchange.getIn(); if (transactionId != null) { startKafkaTransaction(exchange); } if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) { processIterableSync(exchange, message); } else { processSingleMessageSync(exchange, message); } }
@Test public void processSendMessageWithTopicHeader() throws Exception { endpoint.getConfiguration().setTopic("someTopic"); Mockito.when(exchange.getIn()).thenReturn(in); Mockito.when(exchange.getMessage()).thenReturn(in); in.setHeader(KafkaConstants.TOPIC, "anotherTopic"); in.setHeader(KafkaConstants.KEY, "someKey"); in.setHeader(KafkaConstants.PARTITION_KEY, 4); producer.process(exchange); verifySendMessage(4, "someTopic", "someKey"); assertRecordMetadataExists(); }
public synchronized List<String> setLevel(String namespace, Level level) { Objects.requireNonNull(namespace, "Logging namespace may not be null"); Objects.requireNonNull(level, "Level may not be null"); log.info("Setting level of namespace {} and children to {}", namespace, level); List<org.apache.log4j.Logger> childLoggers = loggers(namespace); List<String> result = new ArrayList<>(); for (org.apache.log4j.Logger logger: childLoggers) { setLevel(logger, level); result.add(logger.getName()); } Collections.sort(result); return result; }
@Test public void testSetLevelNullArguments() { Logger root = logger("root"); Loggers loggers = new TestLoggers(root); assertThrows(NullPointerException.class, () -> loggers.setLevel(null, Level.INFO)); assertThrows(NullPointerException.class, () -> loggers.setLevel("root", null)); }
@Override public void close() { watchCache.forEach((dataId, lss) -> { configService.removeListener(dataId, NacosPathConstants.GROUP, lss); watchCache.remove(dataId); LOG.info("nacos sync remove listener key:{}", dataId); }); }
@Test public void testClose() { nacosSyncDataService.close(); }
@Override public WorkerIdentity get() { // Look at configurations first if (mConf.isSetByUser(PropertyKey.WORKER_IDENTITY_UUID)) { String uuidStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from configuration: {}", workerIdentity); return workerIdentity; } // Try loading from the identity file String filePathStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH); final Path idFile = Paths.get(filePathStr); try (BufferedReader reader = Files.newBufferedReader(idFile)) { List<String> nonCommentLines = reader.lines() .filter(line -> !line.startsWith("#")) .filter(line -> !line.trim().isEmpty()) .collect(Collectors.toList()); if (nonCommentLines.size() > 0) { if (nonCommentLines.size() > 1) { LOG.warn("Multiple worker identities configured in {}, only the first one will be used", idFile); } String uuidStr = nonCommentLines.get(0); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from file {}: {}", idFile, workerIdentity); return workerIdentity; } } catch (FileNotFoundException | NoSuchFileException ignored) { // if not existent, proceed to auto generate one LOG.debug("Worker identity file {} not found", idFile); } catch (IOException e) { // in case of other IO error, better stop worker from starting up than use a new identity throw new RuntimeException( String.format("Failed to read worker identity from identity file %s", idFile), e); } // No identity is supplied by the user // Assume this is the first time the worker starts up, and generate a new one LOG.debug("Auto generating new worker identity as no identity is supplied by the user"); UUID generatedId = mUUIDGenerator.get(); WorkerIdentity identity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(generatedId); LOG.debug("Generated worker identity as {}", identity); try (BufferedWriter writer = Files.newBufferedWriter(idFile, StandardCharsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { writer.write("# Worker identity automatically generated at "); writer.write(OffsetDateTime.now().format(DateTimeFormatter.RFC_1123_DATE_TIME)); writer.newLine(); writer.write(generatedId.toString()); writer.newLine(); } catch (Exception e) { LOG.warn("Failed to persist automatically generated worker identity ({}) to {}, " + "this worker will lose its identity after restart", identity, idFile, e); } try { // set the file to be read-only Set<PosixFilePermission> permSet = Files.getPosixFilePermissions(idFile); Set<PosixFilePermission> nonWritablePermSet = Sets.filter(permSet, perm -> perm != PosixFilePermission.OWNER_WRITE && perm != PosixFilePermission.GROUP_WRITE && perm != PosixFilePermission.OTHERS_WRITE); Files.setPosixFilePermissions(idFile, nonWritablePermSet); } catch (Exception e) { LOG.warn("Failed to set identity file to be read-only", e); } return identity; }
@Test public void preferExplicitConfigurationFirst() throws Exception { AlluxioProperties props = new AlluxioProperties(); props.put(PropertyKey.WORKER_IDENTITY_UUID, mReferenceUuid.toString(), Source.RUNTIME); props.set(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH, mUuidFilePath.toString()); try (BufferedWriter fout = Files.newBufferedWriter(mUuidFilePath, StandardCharsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { fout.write(mDifferentUuid.toString()); fout.newLine(); } AlluxioConfiguration conf = new InstancedConfiguration(props); WorkerIdentityProvider provider = new WorkerIdentityProvider(conf); WorkerIdentity identity = provider.get(); assertEquals(mReferenceUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity)); assertNotEquals(mDifferentUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity)); }
public static <T> FileRecords<T> forRecords( final String splitId, final BulkFormat.RecordIterator<T> recordsForSplit) { return new FileRecords<>(splitId, recordsForSplit, Collections.emptySet()); }
@Test void testRecordsInitiallyIllegal() { final FileRecords<Object> records = FileRecords.forRecords("splitId", new SingletonResultIterator<>()); assertThatThrownBy(records::nextRecordFromSplit).isInstanceOf(IllegalStateException.class); }
@Override public boolean isFinished() { return finishing && outputPage == null; }
@Test(dataProvider = "hashEnabledValues") public void testSemiJoinOnVarcharType(boolean hashEnabled) { DriverContext driverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext(); // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), VARCHAR); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row("10") .row("30") .row("30") .row("35") .row("36") .row("37") .row("50") .build()); SetBuilderOperatorFactory setBuilderOperatorFactory = new SetBuilderOperatorFactory( 1, new PlanNodeId("test"), rowPagesBuilder.getTypes().get(0), 0, rowPagesBuilder.getHashChannel(), 10, new JoinCompiler(createTestMetadataManager())); Operator setBuilderOperator = setBuilderOperatorFactory.createOperator(driverContext); Driver driver = Driver.createDriver(driverContext, buildOperator, setBuilderOperator); while (!driver.isFinished()) { driver.process(); } // probe List<Type> probeTypes = ImmutableList.of(VARCHAR, BIGINT); RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(hashEnabled, Ints.asList(0), VARCHAR, BIGINT); List<Page> probeInput = rowPagesBuilderProbe .addSequencePage(10, 30, 0) .build(); Optional<Integer> probeHashChannel = hashEnabled ? Optional.of(probeTypes.size()) : Optional.empty(); OperatorFactory joinOperatorFactory = new HashSemiJoinOperatorFactory( 2, new PlanNodeId("test"), setBuilderOperatorFactory.getSetProvider(), rowPagesBuilderProbe.getTypes(), 0, probeHashChannel); //probeHashChannel); // expected MaterializedResult expected = resultBuilder(driverContext.getSession(), concat(probeTypes, ImmutableList.of(BOOLEAN))) .row("30", 0L, true) .row("31", 1L, false) .row("32", 2L, false) .row("33", 3L, false) .row("34", 4L, false) .row("35", 5L, true) .row("36", 6L, true) .row("37", 7L, true) .row("38", 8L, false) .row("39", 9L, false) .build(); OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, hashEnabled, ImmutableList.of(probeTypes.size())); }
public static String toSingleCrcString(final byte[] bytes) { if (bytes.length != 4) { throw new IllegalArgumentException((String.format( "Unexpected byte[] length '%d' for single CRC. Contents: %s", bytes.length, Arrays.toString(bytes)))); } return String.format("0x%08x", readInt(bytes, 0)); }
@Test public void testToSingleCrcString() { byte[] buf = CrcUtil.intToBytes(0xcafebeef); assertEquals( "0xcafebeef", CrcUtil.toSingleCrcString(buf)); }
@Override public ExplodedPlugin explode(PluginInfo info) { try { File dir = unzipFile(info.getNonNullJarFile()); return explodeFromUnzippedDir(info, info.getNonNullJarFile(), dir); } catch (Exception e) { throw new IllegalStateException(String.format("Fail to open plugin [%s]: %s", info.getKey(), info.getNonNullJarFile().getAbsolutePath()), e); } }
@Test public void copy_and_extract_libs() throws IOException { File jar = loadFile("sonar-checkstyle-plugin-2.8.jar"); ExplodedPlugin exploded = underTest.explode(PluginInfo.create(jar)); assertThat(exploded.getKey()).isEqualTo("checkstyle"); assertThat(exploded.getMain()).isFile().exists(); assertThat(exploded.getLibs()).extracting(File::getName).containsExactlyInAnyOrder("antlr-2.7.6.jar", "checkstyle-5.1.jar", "commons-cli-1.0.jar"); assertThat(new File(jar.getParent(), "sonar-checkstyle-plugin-2.8.jar")).exists(); assertThat(new File(jar.getParent(), "sonar-checkstyle-plugin-2.8.jar_unzip/META-INF/lib/checkstyle-5.1.jar")).exists(); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@TestTemplate public void testPartitionedOr() throws Exception { createPartitionedTable(spark, tableName, "years(ts), bucket(5, id)"); SparkScanBuilder builder = scanBuilder(); YearsFunction.TimestampToYearsFunction tsToYears = new YearsFunction.TimestampToYearsFunction(); UserDefinedScalarFunc udf1 = toUDF(tsToYears, expressions(fieldRef("ts"))); Predicate predicate1 = new Predicate("=", expressions(udf1, intLit(2018 - 1970))); BucketFunction.BucketLong bucketLong = new BucketFunction.BucketLong(DataTypes.LongType); UserDefinedScalarFunc udf = toUDF(bucketLong, expressions(intLit(5), fieldRef("id"))); Predicate predicate2 = new Predicate(">=", expressions(udf, intLit(2))); Predicate predicate = new Or(predicate1, predicate2); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(6); // NOT (years(ts) = 48 OR bucket(id, 5) >= 2) builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(4); }
public void remove(ListNode2<T> node) { if (node == tail) { tail = node.next; } else { node.prev.next = node.next; } if (node == head) { head = node.prev; } else { node.next.prev = node.prev; } size--; }
@Test public void testRemove() { DoublyLinkedList<Integer> list = new DoublyLinkedList<Integer>(); ListNode2<Integer> node1 = list.add(1); list.remove(node1); node1 = list.add(1); ListNode2<Integer> node2 = list.add(2); list.remove(node1); assertEquals(1, list.size()); assertEquals(new Integer(2), list.first()); assertEquals(node2, list.head()); assertArrayEquals(new Integer[]{2}, list.toArray()); list.remove(node2); assertIsEmpty(list); node1 = list.add(1); node2 = list.add(2); list.remove(node2); assertEquals(1, list.size()); assertEquals(new Integer(1), list.first()); assertEquals(node1, list.head()); assertArrayEquals(new Integer[]{1}, list.toArray()); node2 = list.add(2); list.add(3); assertEquals(3, list.size()); assertArrayEquals(new Integer[]{1, 2, 3}, list.toArray()); list.remove(node2); assertEquals(2, list.size()); assertEquals(node1, list.tail()); assertEquals(new Integer(3), list.last()); assertArrayEquals(new Integer[]{1, 3}, list.toArray()); }
@Override public void doPreUpgrade() throws IOException { LOG.info("Starting upgrade of edits directory " + sd.getRoot()); try { NNUpgradeUtil.doPreUpgrade(conf, sd); } catch (IOException ioe) { LOG.error("Failed to move aside pre-upgrade storage " + "in image directory " + sd.getRoot(), ioe); throw ioe; } }
@Test public void testDoPreUpgradeIOError() throws IOException { File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror"); List<URI> editUris = Collections.singletonList(storageDir.toURI()); NNStorage storage = setupEdits(editUris, 5); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); assertNotNull(sd); // Change storage directory so that renaming current to previous.tmp fails. FileUtil.setWritable(storageDir, false); FileJournalManager jm = null; try { jm = new FileJournalManager(conf, sd, storage); exception.expect(IOException.class); if (NativeCodeLoader.isNativeCodeLoaded()) { exception.expectMessage("failure in native rename"); } jm.doPreUpgrade(); } finally { IOUtils.cleanupWithLogger(LOG, jm); // Restore permissions on storage directory and make sure we can delete. FileUtil.setWritable(storageDir, true); FileUtil.fullyDelete(storageDir); } }
static void setHeader(Message message, String name, String value) { MessageProperties properties = message.getMessageProperties(); if (properties == null) return; properties.setHeader(name, value); }
@Test void setHeader() { MessageHeaders.setHeader(message, "b3", "1"); assertThat((String) message.getMessageProperties().getHeader("b3")) .isEqualTo("1"); }
@Override public void configure(Map<String, ?> configs) { super.configure(configs); configureSamplingInterval(configs); configurePrometheusAdapter(configs); configureQueryMap(configs); }
@Test public void testConfigureWithPrometheusScrapingDefaultIntervalDoesNotFail() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(PROMETHEUS_SERVER_ENDPOINT_CONFIG, "kafka-cluster-1.org:9090"); addCapacityConfig(config); _prometheusMetricSampler.configure(config); String expectedQuery = "1 - avg by (instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[2m]))"; assertEquals(expectedQuery, _prometheusMetricSampler._metricToPrometheusQueryMap.get(RawMetricType.BROKER_CPU_UTIL)); }
static Callback create(@Nullable Callback delegate, Span span, CurrentTraceContext current) { if (delegate == null) return new FinishSpan(span); return new DelegateAndFinishSpan(delegate, span, current); }
@Test void on_completion_should_have_span_in_scope() { Span span = tracing.tracer().nextSpan().start(); Callback delegate = (metadata, exception) -> assertThat(currentTraceContext.get()).isSameAs(span.context()); TracingCallback.create(delegate, span, currentTraceContext) .onCompletion(createRecordMetadata(), null); assertThat(spans.get(0).finishTimestamp()).isNotZero(); }
public static OAuthBearerValidationResult validateExpirationTime(OAuthBearerUnsecuredJws jwt, long whenCheckTimeMs, int allowableClockSkewMs) throws OAuthBearerConfigException { Number value; try { value = Objects.requireNonNull(jwt).expirationTime(); } catch (OAuthBearerIllegalTokenException e) { return e.reason(); } boolean exists = value != null; if (!exists) return doesNotExistResult(true, "exp"); double doubleValue = value.doubleValue(); return whenCheckTimeMs - confirmNonNegative(allowableClockSkewMs) >= 1000 * doubleValue ? OAuthBearerValidationResult.newFailure(String.format( "The indicated time (%d ms) minus allowable clock skew (%d ms) was on or after the Expiration Time value (%f seconds)", whenCheckTimeMs, allowableClockSkewMs, doubleValue)) : OAuthBearerValidationResult.newSuccess(); }
@Test public void validateExpirationTime() { long nowMs = TIME.milliseconds(); double nowClaimValue = ((double) nowMs) / 1000; StringBuilder sb = new StringBuilder("{"); appendJsonText(sb, "exp", nowClaimValue); appendCommaJsonText(sb, "sub", "principalName"); sb.append("}"); String compactSerialization = HEADER_COMPACT_SERIALIZATION + Base64.getUrlEncoder().withoutPadding().encodeToString(sb.toString().getBytes(StandardCharsets.UTF_8)) + "."; OAuthBearerUnsecuredJws testJwt = new OAuthBearerUnsecuredJws(compactSerialization, "sub", "scope"); for (int allowableClockSkewMs : new int[] {0, 5, 10, 20}) { for (long whenCheckOffsetMs : new long[] {-10, 0, 10}) { long whenCheckMs = nowMs + whenCheckOffsetMs; OAuthBearerValidationResult result = OAuthBearerValidationUtils.validateExpirationTime(testJwt, whenCheckMs, allowableClockSkewMs); if (whenCheckMs - allowableClockSkewMs >= nowClaimValue * 1000) // expired assertTrue(isFailureWithMessageAndNoFailureScope(result), assertionFailureMessage(nowClaimValue, allowableClockSkewMs, whenCheckMs)); else assertTrue(isSuccess(result), assertionFailureMessage(nowClaimValue, allowableClockSkewMs, whenCheckMs)); } } }
Path getLayerFile(DescriptorDigest layerDigest, DescriptorDigest layerDiffId) { return getLayerDirectory(layerDigest).resolve(getLayerFilename(layerDiffId)); }
@Test public void testGetLayerFile() throws DigestException { DescriptorDigest layerDigest = DescriptorDigest.fromHash( "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); DescriptorDigest diffId = DescriptorDigest.fromHash( "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); Assert.assertEquals( Paths.get( "cache", "directory", "layers", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), TEST_CACHE_STORAGE_FILES.getLayerFile(layerDigest, diffId)); }
@CanIgnoreReturnValue public final Ordered containsExactly() { return containsExactlyEntriesIn(ImmutableMap.of()); }
@Test public void containsExactlyWrongValue_sameToStringForKeys() { expectFailureWhenTestingThat(ImmutableMap.of(1L, "jan", 1, "feb")) .containsExactly(1, "jan", 1L, "feb"); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "for key", "expected value", "but got value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "1 (java.lang.Integer)"); assertFailureValueIndexed("expected value", 0, "jan"); assertFailureValueIndexed("but got value", 0, "feb"); assertFailureValueIndexed("for key", 1, "1 (java.lang.Long)"); assertFailureValueIndexed("expected value", 1, "feb"); assertFailureValueIndexed("but got value", 1, "jan"); }
public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; }
@Test public void testInvalidDefaultString() { assertThrows(ConfigException.class, () -> new ConfigDef().define("name", Type.STRING, "bad", ValidString.in("valid", "values"), Importance.HIGH, "docs")); }
@Override public List<Assignment> rebalance(List<Assignment> currentAssignments, Set<String> workers) { Map<String, List<Instance>> workerToAssignmentMap = new HashMap<>(); workers.forEach(workerId -> workerToAssignmentMap.put(workerId, new LinkedList<>())); currentAssignments.forEach( assignment -> workerToAssignmentMap.computeIfAbsent(assignment.getWorkerId(), s -> new LinkedList<>()) .add(assignment.getInstance())); List<Assignment> newAssignments = new LinkedList<>(); int iterations = 0; while (true) { iterations++; Map.Entry<String, List<Instance>> mostAssignmentsWorker = findWorkerWithMostAssignments(workerToAssignmentMap); Map.Entry<String, List<Instance>> leastAssignmentsWorker = findWorkerWithLeastAssignments(workerToAssignmentMap); if (mostAssignmentsWorker.getValue().size() == leastAssignmentsWorker.getValue().size() || mostAssignmentsWorker.getValue().size() == leastAssignmentsWorker.getValue().size() + 1) { break; } String mostAssignmentsWorkerId = mostAssignmentsWorker.getKey(); String leastAssignmentsWorkerId = leastAssignmentsWorker.getKey(); Queue<Instance> src = (Queue) workerToAssignmentMap.get(mostAssignmentsWorkerId); Queue<Instance> dest = (Queue) workerToAssignmentMap.get(leastAssignmentsWorkerId); Instance instance = src.poll(); Assignment newAssignment = Assignment.newBuilder() .setInstance(instance) .setWorkerId(leastAssignmentsWorkerId) .build(); newAssignments.add(newAssignment); dest.add(instance); } log.info("Rebalance - iterations: {}", iterations); return newAssignments; }
@Test public void testRebalance() { Function.FunctionMetaData function1 = Function.FunctionMetaData.newBuilder() .setFunctionDetails(Function.FunctionDetails.newBuilder().setName("func-1") .setNamespace("namespace-1").setTenant("tenant-1").setParallelism(1)).setVersion(0) .build(); List<Function.Assignment> assignments = new LinkedList<>(); for (int i = 0; i < 10; i++) { Function.Assignment assignment1 = Function.Assignment.newBuilder() .setWorkerId("worker-1") .setInstance(Function.Instance.newBuilder() .setFunctionMetaData(function1).setInstanceId(i).build()) .build(); assignments.add(assignment1); } Set<String> workers = new HashSet<>(); for (int i = 0; i < 3; i++) { workers.add("worker-" + i); } RoundRobinScheduler roundRobinScheduler = new RoundRobinScheduler(); List<Function.Assignment> newAssignments = roundRobinScheduler.rebalance(assignments, workers); Map<String, Integer> workerAssignments = new HashMap<>(); for (Function.Assignment assignment : newAssignments) { Integer count = workerAssignments.get((assignment.getWorkerId())); if (count == null) { count = 0; } count++; workerAssignments.put(assignment.getWorkerId(), count); } Assert.assertEquals(workerAssignments.size(), 2); for (Map.Entry<String, Integer> entry : workerAssignments.entrySet()) { Assert.assertEquals(entry.getValue().intValue(), 3); } }
@Override public HttpResponseOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { return this.write(file, this.toHeaders(file, status, expect), status); } catch(ConflictException e) { if(expect) { if(null != status.getLockId()) { // Handle 412 Precondition Failed with expired token log.warn(String.format("Retry failure %s with lock id %s removed", e, status.getLockId())); return this.write(file, this.toHeaders(file, status.withLockId(null), expect), status); } } throw e; } catch(InteroperabilityException e) { if(expect) { // Handle 417 Expectation Failed log.warn(String.format("Retry failure %s with Expect: Continue removed", e)); return this.write(file, this.toHeaders(file, status.withLockId(null), false), status); } throw e; } }
@Test public void testReplaceContent() throws Exception { final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); final Path folder = new DAVDirectoryFeature(session).mkdir(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpUploadFeature upload = new DAVUploadFeature(session); { final String folderEtag = new DAVAttributesFinderFeature(session).find(folder).getETag(); final byte[] content = RandomUtils.nextBytes(100); final OutputStream out = local.getOutputStream(false); IOUtils.write(content, out); out.close(); final TransferStatus status = new TransferStatus(); status.setLength(content.length); upload.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledConnectionCallback()); assertNotEquals(folderEtag, new DAVAttributesFinderFeature(session).find(folder).getETag()); } final PathAttributes attr1 = new DAVAttributesFinderFeature(session).find(test); { final String folderEtag = new DAVAttributesFinderFeature(session).find(folder).getETag(); final String fileEtag = new DAVAttributesFinderFeature(session).find(test).getETag(); final byte[] content = RandomUtils.nextBytes(101); final OutputStream out = local.getOutputStream(false); IOUtils.write(content, out); out.close(); final TransferStatus status = new TransferStatus(); status.setLength(content.length); upload.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledConnectionCallback()); assertEquals(folderEtag, new DAVAttributesFinderFeature(session).find(folder).getETag()); assertNotEquals(fileEtag, new DAVAttributesFinderFeature(session).find(test).getETag()); } final PathAttributes attr2 = new DAVAttributesFinderFeature(session).find(test); assertEquals(101L, attr2.getSize()); assertNotEquals(attr1.getETag(), attr2.getETag()); new DAVDeleteFeature(session).delete(Arrays.asList(test, folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
static Object[] getAdjustedParametersForMethod(EvaluationContext ctx, Object[] params, boolean isNamedParams, Method m) { logger.trace("getAdjustedParametersForMethod {} {} {} {}", ctx, params, isNamedParams, m); Object[] toReturn = addCtxParamIfRequired(ctx, params, isNamedParams, m); Class<?>[] parameterTypes = m.getParameterTypes(); if (isNamedParams) { // This is inherently frail because it expects that, if, the first parameter is NamedParameter and the // function is a CustomFunction, then all parameters are NamedParameter NamedParameter[] namedParams = Arrays.stream(toReturn).map(NamedParameter.class::cast).toArray(NamedParameter[]::new); toReturn = BaseFEELFunctionHelper.calculateActualParams(m, namedParams); if (toReturn == null) { // incompatible method return null; } } else if (toReturn.length > 0) { // if named parameters, then it has been adjusted already in the calculateActualParams method, // otherwise adjust here toReturn = adjustForVariableParameters(toReturn, parameterTypes); } toReturn = adjustByCoercion(parameterTypes, toReturn); return toReturn; }
@Test void getAdjustedParametersForMethod() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { // StddevFunction.invoke(@ParameterName( "list" ) List<?> list) Method method = StddevFunction.class.getMethod("invoke", List.class); assertNotNull(method); Object actualValue = Arrays.asList(2, 4, 7, 5); Object[] parameters = {new NamedParameter("list", actualValue)}; Object[] retrieved = BaseFEELFunctionHelper.getAdjustedParametersForMethod(ctx, parameters, true, method); assertNotNull(retrieved); assertEquals(parameters.length, retrieved.length); assertEquals(actualValue, retrieved[0]); }
public static Guess performGuess(List<Date> releaseDates) { if (releaseDates.size() <= 1) { return new Guess(Schedule.UNKNOWN, null, null); } else if (releaseDates.size() > MAX_DATA_POINTS) { releaseDates = releaseDates.subList(releaseDates.size() - MAX_DATA_POINTS, releaseDates.size()); } Stats stats = getStats(releaseDates); final int maxTotalWrongDays = Math.max(1, releaseDates.size() / 5); final int maxSingleDayOff = releaseDates.size() / 10; GregorianCalendar last = new GregorianCalendar(); last.setTime(releaseDates.get(releaseDates.size() - 1)); last.set(Calendar.HOUR_OF_DAY, (int) stats.medianHour); last.set(Calendar.MINUTE, (int) ((stats.medianHour - Math.floor(stats.medianHour)) * 60)); last.set(Calendar.SECOND, 0); last.set(Calendar.MILLISECOND, 0); if (Math.abs(stats.medianDistance - ONE_DAY) < 2 * ONE_HOUR && stats.avgDeltaToMedianDistance < 2 * ONE_HOUR) { addTime(last, ONE_DAY); return new Guess(Schedule.DAILY, Arrays.asList(Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY, Calendar.THURSDAY, Calendar.FRIDAY, Calendar.SATURDAY, Calendar.SUNDAY), last.getTime()); } else if (Math.abs(stats.medianDistance - ONE_WEEK) < ONE_DAY && stats.avgDeltaToMedianDistance < 2 * ONE_DAY) { // Just using last.set(Calendar.DAY_OF_WEEK) could skip a week // when the last release is delayed over week boundaries addTime(last, 3 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.WEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } else if (Math.abs(stats.medianDistance - 2 * ONE_WEEK) < ONE_DAY && stats.avgDeltaToMedianDistance < 2 * ONE_DAY) { // Just using last.set(Calendar.DAY_OF_WEEK) could skip a week // when the last release is delayed over week boundaries addTime(last, 10 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.BIWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } else if (Math.abs(stats.medianDistance - ONE_MONTH) < 5 * ONE_DAY && stats.avgDeltaToMedianDistance < 5 * ONE_DAY) { if (stats.daysOfMonth[stats.mostOftenDayOfMonth] >= releaseDates.size() - maxTotalWrongDays) { // Just using last.set(Calendar.DAY_OF_MONTH) could skip a week // when the last release is delayed over week boundaries addTime(last, 2 * ONE_WEEK); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_MONTH) != stats.mostOftenDayOfMonth); return new Guess(Schedule.MONTHLY, null, last.getTime()); } addTime(last, 3 * ONE_WEEK + 3 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.FOURWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } // Find release days List<Integer> largeDays = new ArrayList<>(); for (int i = Calendar.SUNDAY; i <= Calendar.SATURDAY; i++) { if (stats.daysOfWeek[i] > maxSingleDayOff) { largeDays.add(i); } } // Ensure that all release days are used similarly often int averageDays = releaseDates.size() / largeDays.size(); boolean matchesAverageDays = true; for (int day : largeDays) { if (stats.daysOfWeek[day] < averageDays - maxSingleDayOff) { matchesAverageDays = false; break; } } if (matchesAverageDays && stats.medianDistance < ONE_WEEK) { // Fixed daily release schedule (eg Mo, Thu, Fri) addUntil(last, largeDays); if (largeDays.size() == 5 && largeDays.containsAll(Arrays.asList( Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY, Calendar.THURSDAY, Calendar.FRIDAY))) { return new Guess(Schedule.WEEKDAYS, largeDays, last.getTime()); } return new Guess(Schedule.SPECIFIC_DAYS, largeDays, last.getTime()); } else if (largeDays.size() == 1) { // Probably still weekly with more exceptions than others addUntil(last, largeDays); return new Guess(Schedule.WEEKLY, largeDays, last.getTime()); } addTime(last, (long) (0.6f * stats.medianDistance)); return new Guess(Schedule.UNKNOWN, null, last.getTime()); }
@Test public void testUnknown() { ArrayList<Date> releaseDates = new ArrayList<>(); releaseDates.add(makeDate("2024-01-01 16:30")); releaseDates.add(makeDate("2024-01-03 16:30")); releaseDates.add(makeDate("2024-01-03 16:31")); releaseDates.add(makeDate("2024-01-04 16:30")); releaseDates.add(makeDate("2024-01-04 16:31")); releaseDates.add(makeDate("2024-01-07 16:30")); releaseDates.add(makeDate("2024-01-07 16:31")); releaseDates.add(makeDate("2024-01-10 16:30")); ReleaseScheduleGuesser.Guess guess = performGuess(releaseDates); assertEquals(ReleaseScheduleGuesser.Schedule.UNKNOWN, guess.schedule); assertClose(makeDate("2024-01-12 16:30"), guess.nextExpectedDate, 2 * ONE_DAY); }
public static SupportLevel defaultSupportLevel(String firstVersion, String currentVersion) { if (firstVersion == null || firstVersion.isEmpty()) { throw new IllegalArgumentException( "FirstVersion is not specified. This can be done in @UriEndpoint or in pom.xml file."); } // we only want major/minor (strip patch) Version v1 = new Version(firstVersion); v1 = new Version(v1.getMajor() + "." + v1.getMinor()); Version v2 = new Version(currentVersion); v2 = new Version(v2.getMajor() + "." + v2.getMinor()); boolean justNew = CamelVersionHelper.isGE(v2.toString(), v1.toString()); boolean prevNew = CamelVersionHelper.isGE(CamelVersionHelper.prevMinor(v2.toString()), v1.toString()); if (justNew || prevNew) { // its a new component (2 releases back) that is added to this version so lets mark it as preview by default return SupportLevel.Preview; } else { return SupportLevel.Stable; } }
@Test public void testPreview() { Assertions.assertEquals(SupportLevel.Preview, SupportLevelHelper.defaultSupportLevel("3.19.0", "3.20.0")); Assertions.assertEquals(SupportLevel.Preview, SupportLevelHelper.defaultSupportLevel("3.19.0", "3.20.1")); Assertions.assertEquals(SupportLevel.Preview, SupportLevelHelper.defaultSupportLevel("3.19.1", "3.20.1")); Assertions.assertEquals(SupportLevel.Preview, SupportLevelHelper.defaultSupportLevel("3.19.1", "3.20.2")); Assertions.assertNotEquals(SupportLevel.Preview, SupportLevelHelper.defaultSupportLevel("3.19.0", "3.21.0")); }
public synchronized void fenceProducer() { verifyNotClosed(); verifyNotFenced(); verifyTransactionsInitialized(); this.producerFenced = true; }
@Test public void shouldThrowFenceProducerIfTransactionsNotInitialized() { buildMockProducer(true); assertThrows(IllegalStateException.class, () -> producer.fenceProducer()); }
@Override public ListenableFuture<?> execute(StartTransaction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Session session = stateMachine.getSession(); if (!session.isClientTransactionSupport()) { throw new PrestoException(StandardErrorCode.INCOMPATIBLE_CLIENT, "Client does not support transactions"); } if (session.getTransactionId().isPresent()) { throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, "Nested transactions not supported"); } Optional<IsolationLevel> isolationLevel = extractIsolationLevel(statement); Optional<Boolean> readOnly = extractReadOnly(statement); TransactionId transactionId = transactionManager.beginTransaction( isolationLevel.orElse(TransactionManager.DEFAULT_ISOLATION), readOnly.orElse(TransactionManager.DEFAULT_READ_ONLY), false); stateMachine.setStartedTransactionId(transactionId); // Since the current session does not contain this new transaction ID, we need to manually mark it as inactive // when this statement completes. transactionManager.trySetInactive(transactionId); return immediateFuture(null); }
@Test public void testStartTransaction() { Session session = sessionBuilder() .setClientTransactionSupport() .build(); TransactionManager transactionManager = createTestTransactionManager(); QueryStateMachine stateMachine = createQueryStateMachine("START TRANSACTION", session, true, transactionManager, executor, metadata); assertFalse(stateMachine.getSession().getTransactionId().isPresent()); StartTransactionTask startTransactionTask = new StartTransactionTask(); getFutureValue(startTransactionTask.execute(new StartTransaction(ImmutableList.of()), transactionManager, metadata, new AllowAllAccessControl(), stateMachine, emptyList())); assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()); assertTrue(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent()); assertEquals(transactionManager.getAllTransactionInfos().size(), 1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().get()); assertFalse(transactionInfo.isAutoCommitContext()); }
public void setServiceName(String serviceName) { this.serviceName = serviceName; }
@Test void testSetServiceName() { String serviceName = "nacos"; metadataOperation.setServiceName(serviceName); assertEquals(metadataOperation.getServiceName(), serviceName); }
@NonNull private static VariableSpace getSpace( @NonNull ConnectionDetails connectionDetails ) { VariableSpace space = connectionDetails.getSpace(); return space != null ? space : Variables.getADefaultVariableSpace(); }
@Test public void testGetResolvedRootPathSubstitutesVariablesWithDefaultSpace() throws KettleException { when( vfsConnectionDetails.getSpace() ).thenReturn( null ); // Default variable space is functional and has no variables, so no substitution actually occurs. assertGetResolvedRootPath( "root/${0}/path", "root/${0}/path", "root/${0}/path" ); }
public static Method getMostSpecificMethod(Method method, Class<?> targetClass) { if (targetClass != null && targetClass != method.getDeclaringClass() && isOverridable(method, targetClass)) { try { if (Modifier.isPublic(method.getModifiers())) { try { return targetClass.getMethod(method.getName(), method.getParameterTypes()); } catch (NoSuchMethodException ex) { return method; } } else { return method; } } catch (SecurityException ex) { // Security settings are disallowing reflective access; fall back to 'method' below. } } return method; }
@Test public void testGetMostSpecificPublicMethod() throws NoSuchMethodException { Method method = Map.class.getDeclaredMethod("remove", Object.class, Object.class); Method specificMethod = ClassUtils.getMostSpecificMethod(method, HashMap.class); assertEquals(HashMap.class.getDeclaredMethod("remove", Object.class, Object.class), specificMethod); }
@Override public CiConfiguration loadConfiguration() { String revision = system.envVariable(PROPERTY_COMMIT); if (isEmpty(revision)) { LOG.warn("Missing environment variable " + PROPERTY_COMMIT); } String githubRepository = system.envVariable(GITHUB_REPOSITORY_ENV_VAR); String githubApiUrl = system.envVariable(GITHUB_API_URL_ENV_VAR); if (isEmpty(githubRepository) || isEmpty(githubApiUrl)) { LOG.warn("Missing or empty environment variables: {}, and/or {}", GITHUB_API_URL_ENV_VAR, GITHUB_REPOSITORY_ENV_VAR); return new CiConfigurationImpl(revision, getName()); } return new CiConfigurationImpl(revision, getName(), new DevOpsPlatformInfo(githubApiUrl, githubRepository)); }
@Test public void loadConfiguration() { setEnvVariable("GITHUB_ACTION", "build"); setEnvVariable("GITHUB_SHA", "abd12fc"); setEnvVariable("GITHUB_API_URL", GITHUB_API_URL); setEnvVariable("GITHUB_REPOSITORY", REPOSITORY); CiConfiguration configuration = underTest.loadConfiguration(); assertThat(configuration.getScmRevision()).hasValue("abd12fc"); checkDevOpsPlatformInfo(configuration); }