focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void transform(Message message, DataType fromType, DataType toType) { final Map<String, Object> headers = message.getHeaders(); Map<String, Object> cloudEventAttributes = new HashMap<>(); CloudEvent cloudEvent = CloudEvents.v1_0; for (CloudEvent.Attribute attribute : cloudEvent.attributes()) { if (headers.containsKey(attribute.id())) { cloudEventAttributes.put(attribute.json(), headers.get(attribute.id())); } } cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_VERSION).json(), cloudEvent.version()); cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).json(), message.getExchange().getExchangeId()); cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).json(), CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_TYPE); cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).json(), CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_SOURCE); cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TIME).json(), cloudEvent.getEventTime(message.getExchange())); String body = MessageHelper.extractBodyAsString(message); cloudEventAttributes.putIfAbsent("data", body); cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).json(), headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, "application/json")); headers.put(Exchange.CONTENT_TYPE, "application/cloudevents+json"); message.setBody(createCouldEventJsonObject(cloudEventAttributes)); cloudEvent.attributes().stream().map(CloudEvent.Attribute::id).forEach(headers::remove); }
@Test void shouldSetDefaultCloudEventAttributes() throws Exception { Exchange exchange = new DefaultExchange(camelContext); exchange.getMessage().setBody(new ByteArrayInputStream("Test".getBytes(StandardCharsets.UTF_8))); transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY); CloudEvent cloudEvent = CloudEvents.v1_0; assertTrue(exchange.getMessage().hasHeaders()); assertEquals("application/cloudevents+json", exchange.getMessage().getHeader(Exchange.CONTENT_TYPE)); assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"%s\"", cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).json(), exchange.getExchangeId()))); assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"org.apache.camel.event\"", cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).json()))); assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"org.apache.camel\"", cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).json()))); assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"application/json\"", cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).json()))); assertTrue(exchange.getMessage().getBody(String.class).contains("\"data\":\"Test\"")); }
@Override public boolean supportsPluginSettingsNotification() { return true; }
@Test public void shouldNotSupportSettingsNotification() throws Exception { assertTrue(messageHandler.supportsPluginSettingsNotification()); }
public final Sensor taskLevelSensor(final String threadId, final String taskId, final String sensorSuffix, final RecordingLevel recordingLevel, final Sensor... parents) { final String sensorPrefix = taskSensorPrefix(threadId, taskId); synchronized (taskLevelSensors) { return getSensors(taskLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents); } }
@Test public void shouldGetExistingTaskLevelSensor() { final Metrics metrics = mock(Metrics.class); final RecordingLevel recordingLevel = RecordingLevel.INFO; setupGetExistingSensorTest(metrics); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); final Sensor actualSensor = streamsMetrics.taskLevelSensor( THREAD_ID1, TASK_ID1, SENSOR_NAME_1, recordingLevel ); assertThat(actualSensor, is(equalToObject(sensor))); }
public void open() throws IOException { if (!this.closed.get()) { throw new IOException("queue already opened"); } lock.lock(); try { try { // verify exclusive access to the dirPath this.dirLock = FileLockFactory.obtainLock(this.dirPath, LOCK_NAME); } catch (LockException e) { throw new LockException("The queue failed to obtain exclusive access, cause: " + e.getMessage()); } try { openPages(); this.closed.set(false); } catch (IOException e) { // upon any exception while opening the queue and after dirlock has been obtained // we need to make sure to release the dirlock. Calling the close method on a partially // open queue has no effect because the closed flag is still true. releaseLockAndSwallow(); throw(e); } } finally { lock.unlock(); } }
@Test(expected = IOException.class) public void throwsWhenNotEnoughDiskFree() throws Exception { Settings settings = SettingsImpl.builder(TestSettings.persistedQueueSettings(100, dataPath)) .queueMaxBytes(Long.MAX_VALUE) .build(); try (Queue queue = new Queue(settings)) { queue.open(); } }
@Override public int truncate(final long logIndex, final int pos) { this.writeLock.lock(); try { if (logIndex < this.header.getFirstLogIndex() || logIndex > this.header.getLastLogIndex()) { return 0; } final int slot = (int) (logIndex - this.header.getFirstLogIndex()); return truncateToSlot(slot); } finally { this.writeLock.unlock(); } }
@Test public void testTruncate() { // Append 10 index entry for (int idx = 1; idx <= 10; idx++) { this.offsetIndex.appendIndex(idx, idx, segmentIndex); } // Check truncate to 9 { this.offsetIndex.truncate(9, 0); assertEquals(8, this.offsetIndex.getLastLogIndex()); } // Check truncate to 5 { this.offsetIndex.truncate(5, 0); // Test recover this.offsetIndex.shutdown(1000); this.init(); this.offsetIndex.recover(); assertEquals(4, this.offsetIndex.getLastLogIndex()); } }
public static RuleViolation isInputStandard(TransactionInput input) { for (ScriptChunk chunk : input.getScriptSig().chunks()) { if (chunk.data != null && !chunk.isShortestPossiblePushData()) return RuleViolation.SHORTEST_POSSIBLE_PUSHDATA; if (chunk.isPushData()) { ECDSASignature signature; try { signature = ECKey.ECDSASignature.decodeFromDER(chunk.data); } catch (SignatureDecodeException x) { // Doesn't look like a signature. signature = null; } if (signature != null) { if (!TransactionSignature.isEncodingCanonical(chunk.data)) return RuleViolation.SIGNATURE_CANONICAL_ENCODING; if (!signature.isCanonical()) return RuleViolation.SIGNATURE_CANONICAL_ENCODING; } } } return RuleViolation.NONE; }
@Test public void canonicalSignature() { TransactionSignature sig = TransactionSignature.dummy(); Script scriptOk = ScriptBuilder.createInputScript(sig); assertEquals(RuleViolation.NONE, DefaultRiskAnalysis.isInputStandard(new TransactionInput(null, scriptOk.program(), TransactionOutPoint.UNCONNECTED))); byte[] sigBytes = sig.encodeToBitcoin(); // Appending a zero byte makes the signature uncanonical without violating DER encoding. Script scriptUncanonicalEncoding = new ScriptBuilder().data(Arrays.copyOf(sigBytes, sigBytes.length + 1)) .build(); assertEquals(RuleViolation.SIGNATURE_CANONICAL_ENCODING, DefaultRiskAnalysis.isInputStandard( new TransactionInput(null, scriptUncanonicalEncoding.program(), TransactionOutPoint.UNCONNECTED))); }
public static boolean isEditionBundled(Plugin plugin) { return SONARSOURCE_ORGANIZATION.equalsIgnoreCase(plugin.getOrganization()) && Arrays.stream(SONARSOURCE_COMMERCIAL_LICENSES).anyMatch(s -> s.equalsIgnoreCase(plugin.getLicense())); }
@Test public void isEditionBundled_on_Plugin_returns_false_for_license_SonarSource_and_non_SonarSource_organization() { Plugin plugin = newPlugin(randomAlphanumeric(3), randomizeCase("SonarSource")); assertThat(EditionBundledPlugins.isEditionBundled(plugin)).isFalse(); }
public static OpenGaussErrorResponsePacket newInstance(final Exception cause) { Optional<ServerErrorMessage> serverErrorMessage = findServerErrorMessage(cause); return serverErrorMessage.map(OpenGaussErrorResponsePacket::new).orElseGet(() -> createErrorResponsePacket(SQLExceptionTransformEngine.toSQLException(cause, DATABASE_TYPE))); }
@Test void assertNewInstanceWithSQLException() { SQLException cause = new SQLException("database \"test\" does not exist", "3D000", null); OpenGaussErrorResponsePacket actual = OpenGaussErrorPacketFactory.newInstance(cause); Map<Character, String> actualFields = getFieldsInPacket(actual); assertThat(actualFields.size(), is(4)); assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_SEVERITY), is("ERROR")); assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_CODE), is("3D000")); assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_MESSAGE), is("database \"test\" does not exist")); assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_ERROR_CODE), is("0")); }
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) { assert ctx.executor().inEventLoop(); return service.pulsar().getTransactionMetadataStoreService() .verifyTxnOwnership(txnID, getPrincipal()) .thenComposeAsync(isOwner -> { if (isOwner) { return CompletableFuture.completedFuture(true); } if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) { return isSuperUser(); } else { return CompletableFuture.completedFuture(false); } }, ctx.executor()); }
@Test(timeOut = 30000) public void sendEndTxnOnSubscriptionFailed() throws Exception { final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class); when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class))); when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true)); when(txnStore.endTransaction(any(TxnID.class), anyInt(), anyBoolean())) .thenReturn(CompletableFuture.completedFuture(null)); when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore); svcConfig.setTransactionCoordinatorEnabled(true); resetChannel(); setChannelConnected(); Topic topic = mock(Topic.class); final org.apache.pulsar.broker.service.Subscription sub = mock(org.apache.pulsar.broker.service.Subscription.class); doReturn(sub).when(topic).getSubscription(any()); doReturn(CompletableFuture.failedFuture(new RuntimeException("server error"))) .when(sub).endTxn(anyLong(), anyLong(), anyInt(), anyLong()); doReturn(CompletableFuture.completedFuture(Optional.of(topic))).when(brokerService) .getTopicIfExists(any(String.class)); ByteBuf clientCommand = Commands.newEndTxnOnSubscription(89L, 1L, 12L, successTopicName, successSubName, TxnAction.COMMIT, 1L); channel.writeInbound(clientCommand); CommandEndTxnOnSubscriptionResponse response = (CommandEndTxnOnSubscriptionResponse) getResponse(); assertEquals(response.getRequestId(), 89L); assertEquals(response.getTxnidLeastBits(), 1L); assertEquals(response.getTxnidMostBits(), 12L); assertEquals(response.getError().getValue(), 0); assertEquals(response.getMessage(), "Handle end txn on subscription failed: server error"); channel.finish(); }
@Description("The bucket number of a value given a lower and upper bound and the number of buckets") @ScalarFunction("width_bucket") @SqlType(StandardTypes.BIGINT) public static long widthBucket(@SqlType(StandardTypes.DOUBLE) double operand, @SqlType(StandardTypes.DOUBLE) double bound1, @SqlType(StandardTypes.DOUBLE) double bound2, @SqlType(StandardTypes.BIGINT) long bucketCount) { checkCondition(bucketCount > 0, INVALID_FUNCTION_ARGUMENT, "bucketCount must be greater than 0"); checkCondition(!isNaN(operand), INVALID_FUNCTION_ARGUMENT, "operand must not be NaN"); checkCondition(isFinite(bound1), INVALID_FUNCTION_ARGUMENT, "first bound must be finite"); checkCondition(isFinite(bound2), INVALID_FUNCTION_ARGUMENT, "second bound must be finite"); checkCondition(bound1 != bound2, INVALID_FUNCTION_ARGUMENT, "bounds cannot equal each other"); long result = 0; double lower = Math.min(bound1, bound2); double upper = Math.max(bound1, bound2); if (operand < lower) { result = 0; } else if (operand >= upper) { try { result = Math.addExact(bucketCount, 1); } catch (ArithmeticException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("Bucket for value %s is out of range", operand)); } } else { result = (long) ((double) bucketCount * (operand - lower) / (upper - lower) + 1); } if (bound1 > bound2) { result = (bucketCount - result) + 1; } return result; }
@Test public void testWidthBucket() { assertFunction("width_bucket(3.14E0, 0, 4, 3)", BIGINT, 3L); assertFunction("width_bucket(2, 0, 4, 3)", BIGINT, 2L); assertFunction("width_bucket(infinity(), 0, 4, 3)", BIGINT, 4L); assertFunction("width_bucket(-1, 0, 3.2E0, 4)", BIGINT, 0L); // bound1 > bound2 is not symmetric with bound2 > bound1 assertFunction("width_bucket(3.14E0, 4, 0, 3)", BIGINT, 1L); assertFunction("width_bucket(2, 4, 0, 3)", BIGINT, 2L); assertFunction("width_bucket(infinity(), 4, 0, 3)", BIGINT, 0L); assertFunction("width_bucket(-1, 3.2E0, 0, 4)", BIGINT, 5L); // failure modes assertInvalidFunction("width_bucket(3.14E0, 0, 4, 0)", "bucketCount must be greater than 0"); assertInvalidFunction("width_bucket(3.14E0, 0, 4, -1)", "bucketCount must be greater than 0"); assertInvalidFunction("width_bucket(nan(), 0, 4, 3)", "operand must not be NaN"); assertInvalidFunction("width_bucket(3.14E0, -1, -1, 3)", "bounds cannot equal each other"); assertInvalidFunction("width_bucket(3.14E0, nan(), -1, 3)", "first bound must be finite"); assertInvalidFunction("width_bucket(3.14E0, -1, nan(), 3)", "second bound must be finite"); assertInvalidFunction("width_bucket(3.14E0, infinity(), -1, 3)", "first bound must be finite"); assertInvalidFunction("width_bucket(3.14E0, -1, infinity(), 3)", "second bound must be finite"); }
ConcurrentConveyor(E submitterGoneItem, QueuedPipe<E>... queues) { if (queues.length == 0) { throw new IllegalArgumentException("No concurrent queues supplied"); } this.submitterGoneItem = submitterGoneItem; this.queues = validateAndCopy(queues); this.liveQueueCount = queues.length; }
@Test(expected = IllegalArgumentException.class) public void mustPassSomeQueues() { concurrentConveyor(doneItem); }
public ScriptDefinition assemble(InputStream in) throws IOException { // Get our lexer rs2asmLexer lexer = new rs2asmLexer(new ANTLRInputStream(new InputStreamReader(in, StandardCharsets.UTF_8))); LexerErrorListener errorListener = new LexerErrorListener(); lexer.addErrorListener(errorListener); // Get a list of matched tokens CommonTokenStream tokens = new CommonTokenStream(lexer); // Pass the tokens to the parser rs2asmParser parser = new rs2asmParser(tokens); // Specify our entry point ProgContext progContext = parser.prog(); if (errorListener.getErrors() > 0) { throw new RuntimeException("syntax error"); } // Walk it and attach our listener ParseTreeWalker walker = new ParseTreeWalker(); // walk through first and resolve labels LabelVisitor labelVisitor = new LabelVisitor(); walker.walk(labelVisitor, progContext); ScriptWriter listener = new ScriptWriter(instructions, labelVisitor); walker.walk(listener, progContext); return listener.buildScript(); }
@Test public void testAssemble() throws Exception { InputStream in = AssemblerTest.class.getResourceAsStream(script); Assert.assertNotNull(in); Instructions instructions = new Instructions(); instructions.init(); Assembler assembler = new Assembler(instructions); ScriptDefinition script = assembler.assemble(in); // compare with disassembler Disassembler disassembler = new Disassembler(); String out = disassembler.disassemble(script); in = AssemblerTest.class.getResourceAsStream(this.script); Assert.assertNotNull(in); String original = new String(IOUtils.toByteArray(in), StandardCharsets.UTF_8).replaceAll("\r\n", "\n"); logger.info(original); logger.info("-----------------------"); logger.info(out); Assert.assertEquals(original, out); }
public static Map<String, String> getAllGroupNames(Pattern pattern, CharSequence content) { if (null == content || null == pattern) { return null; } final Matcher m = pattern.matcher(content); final Map<String, String> result = MapUtil.newHashMap(m.groupCount()); if (m.find()) { // 通过反射获取 namedGroups 方法 final Map<String, Integer> map = ReflectUtil.invoke(pattern, "namedGroups"); map.forEach((key, value) -> result.put(key, m.group(value))); } return result; }
@Test public void getAllGroupNamesTest() { final String content = "2021-10-11"; final String regex = "(?<year>\\d+)-(?<month>\\d+)-(?<day>\\d+)"; final Map<String, String> map = ReUtil.getAllGroupNames(PatternPool.get(regex, Pattern.DOTALL), content); assertEquals(map.get("year"), "2021"); assertEquals(map.get("month"), "10"); assertEquals(map.get("day"), "11"); }
@Inject public RecordingHiveMetastore(@ForRecordingHiveMetastore ExtendedHiveMetastore delegate, MetastoreClientConfig metastoreClientConfig) throws IOException { this.delegate = requireNonNull(delegate, "delegate is null"); requireNonNull(metastoreClientConfig, "hiveClientConfig is null"); this.recordingPath = requireNonNull(metastoreClientConfig.getRecordingPath(), "recordingPath is null"); this.replay = metastoreClientConfig.isReplay(); databaseCache = createCache(metastoreClientConfig); tableCache = createCache(metastoreClientConfig); tableConstraintsCache = createCache(metastoreClientConfig); supportedColumnStatisticsCache = createCache(metastoreClientConfig); tableStatisticsCache = createCache(metastoreClientConfig); partitionStatisticsCache = createCache(metastoreClientConfig); allTablesCache = createCache(metastoreClientConfig); allViewsCache = createCache(metastoreClientConfig); partitionCache = createCache(metastoreClientConfig); partitionNamesCache = createCache(metastoreClientConfig); partitionNamesByFilterCache = createCache(metastoreClientConfig); partitionsByNamesCache = createCache(metastoreClientConfig); tablePrivilegesCache = createCache(metastoreClientConfig); roleGrantsCache = createCache(metastoreClientConfig); if (replay) { loadRecording(); } }
@Test public void testRecordingHiveMetastore() throws IOException { MetastoreClientConfig recordingHiveClientConfig = new MetastoreClientConfig() .setRecordingPath(File.createTempFile("recording_test", "json").getAbsolutePath()) .setRecordingDuration(new Duration(10, TimeUnit.MINUTES)); RecordingHiveMetastore recordingHiveMetastore = new RecordingHiveMetastore(new TestingHiveMetastore(), recordingHiveClientConfig); validateMetadata(recordingHiveMetastore); recordingHiveMetastore.dropDatabase(TEST_METASTORE_CONTEXT, "other_database"); recordingHiveMetastore.writeRecording(); MetastoreClientConfig replayingHiveClientConfig = recordingHiveClientConfig .setReplay(true); recordingHiveMetastore = new RecordingHiveMetastore(new UnimplementedHiveMetastore(), replayingHiveClientConfig); recordingHiveMetastore.loadRecording(); validateMetadata(recordingHiveMetastore); }
protected static <T extends Contract> T deploy( Class<T> type, Web3j web3j, Credentials credentials, ContractGasProvider contractGasProvider, String binary, String encodedConstructor, BigInteger value) throws RuntimeException, TransactionException { try { Constructor<T> constructor = type.getDeclaredConstructor( String.class, Web3j.class, Credentials.class, ContractGasProvider.class); constructor.setAccessible(true); // we want to use null here to ensure that "to" parameter on message is not populated T contract = constructor.newInstance(null, web3j, credentials, contractGasProvider); return create(contract, binary, encodedConstructor, value); } catch (TransactionException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void testDeploy() throws Exception { TransactionReceipt transactionReceipt = createTransactionReceipt(); Contract deployedContract = deployContract(transactionReceipt); assertEquals(ADDRESS, deployedContract.getContractAddress()); assertTrue(deployedContract.getTransactionReceipt().isPresent()); assertEquals(transactionReceipt, deployedContract.getTransactionReceipt().get()); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { return folder.withAttributes( new BrickAttributesFinderFeature(session).toAttributes(new FoldersApi(new BrickApiClient(session)) .postFoldersPath(StringUtils.removeStart(folder.getAbsolute(), String.valueOf(Path.DELIMITER))))); } catch(ApiException e) { throw new BrickExceptionMappingService().map("Cannot create folder {0}", e, folder); } }
@Test public void testMakeDirectory() throws Exception { final Path directory = new BrickDirectoryFeature(session).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertThrows(ConflictException.class, () -> new BrickDirectoryFeature(session).mkdir(directory, new TransferStatus())); new BrickDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
void onComplete(List<Map<TopicIdPartition, Acknowledgements>> acknowledgementsMapList) { final ArrayList<Throwable> exceptions = new ArrayList<>(); acknowledgementsMapList.forEach(acknowledgementsMap -> acknowledgementsMap.forEach((partition, acknowledgements) -> { Exception exception = null; if (acknowledgements.getAcknowledgeErrorCode() != null) { exception = acknowledgements.getAcknowledgeErrorCode().exception(); } Set<Long> offsets = acknowledgements.getAcknowledgementsTypeMap().keySet(); Set<Long> offsetsCopy = Collections.unmodifiableSet(offsets); enteredCallback = true; try { acknowledgementCommitCallback.onComplete(Collections.singletonMap(partition, offsetsCopy), exception); } catch (Throwable e) { LOG.error("Exception thrown by acknowledgement commit callback", e); exceptions.add(e); } finally { enteredCallback = false; } })); if (!exceptions.isEmpty()) { throw ConsumerUtils.maybeWrapAsKafkaException(exceptions.get(0), "Exception thrown by acknowledgement commit callback"); } }
@Test public void testInvalidRecord() throws Exception { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(0L, AcknowledgeType.ACCEPT); acknowledgements.add(1L, AcknowledgeType.REJECT); acknowledgements.setAcknowledgeErrorCode(Errors.INVALID_RECORD_STATE); acknowledgementsMap.put(tip0, acknowledgements); acknowledgementCommitCallbackHandler.onComplete(Collections.singletonList(acknowledgementsMap)); TestUtils.retryOnExceptionWithTimeout(() -> { assertInstanceOf(InvalidRecordStateException.class, exceptionMap.get(tpo00)); assertInstanceOf(InvalidRecordStateException.class, exceptionMap.get(tpo01)); }); }
@Override public byte[][] split(byte[] message) { if (!isSplitRequired()) { byte[] nliMessage = new byte[UDHIE_NLI_SINGLE_MSG_HEADER_REAL_LENGTH + message.length]; nliMessage[0] = (byte) UDHIE_NLI_SINGLE_MSG_HEADER_LENGTH; nliMessage[1] = (byte) UDHIE_NLI_IDENTIFIER; nliMessage[2] = (byte) UDHIE_NLI_HEADER_LENGTH; nliMessage[3] = this.languageIdentifier; System.arraycopy(message, 0, nliMessage, 4, message.length); return new byte[][] { nliMessage }; } int segmentLength = getSegmentLength(); // determine how many messages int segmentNum = message.length / segmentLength; int messageLength = message.length; if (segmentNum > MAX_SEG_COUNT) { // this is too long, can't fit, so chop segmentNum = MAX_SEG_COUNT; messageLength = segmentNum * segmentLength; } if ((messageLength % segmentLength) > 0) { segmentNum++; } byte[][] segments = new byte[segmentNum][]; int lengthOfData; byte refNum = getReferenceNumber(); for (int i = 0; i < segmentNum; i++) { logger.debug("segment number = {}", i); if (segmentNum - i == 1) { lengthOfData = messageLength - i * segmentLength; } else { lengthOfData = segmentLength; } logger.debug("Length of data = {}", lengthOfData); segments[i] = new byte[UDHIE_NLI_MULTI_MSG_HEADER_REAL_LENGTH + lengthOfData]; logger.debug("segments[{}].length = {}", i, segments[i].length); segments[i][0] = UDHIE_NLI_MULTI_MSG_HEADER_LENGTH; // doesn't include itself, is header length // SAR identifier segments[i][1] = UDHIE_IDENTIFIER_SAR; // SAR length segments[i][2] = UDHIE_SAR_LENGTH; // DATAGRAM REFERENCE NUMBER segments[i][3] = refNum; // total number of segments segments[i][4] = (byte) segmentNum; // segment # segments[i][5] = (byte) (i + 1); // national language locking shift table, element identifier segments[i][6] = (byte) UDHIE_NLI_IDENTIFIER; segments[i][7] = (byte) UDHIE_NLI_HEADER_LENGTH; segments[i][8] = this.languageIdentifier; // now copy the data System.arraycopy(message, i * segmentLength, segments[i], UDHIE_NLI_MULTI_MSG_HEADER_REAL_LENGTH, lengthOfData); } return segments; }
@Test public void splitTurkishShortMessageWith155Character() { String message = "12345678901234567890123456789012345678901234567890123456789012345678901234567890" + "123456789012345678901234567890123456789012345678901234567890123456789012345"; // 155 single message byte turkishLanguageIdentifier = 0x01; SmppSplitter splitter = new SmppNLSTSplitter(message.length(), turkishLanguageIdentifier); SmppSplitter.resetCurrentReferenceNumber(); byte[][] result = splitter.split(message.getBytes()); assertEquals(1, result.length); assertArrayEquals( new byte[] { SmppNLSTSplitter.UDHIE_NLI_SINGLE_MSG_HEADER_LENGTH, SmppNLSTSplitter.UDHIE_NLI_IDENTIFIER, SmppNLSTSplitter.UDHIE_NLI_HEADER_LENGTH, turkishLanguageIdentifier, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 49, 50, 51, 52, 53 }, result[0]); }
@Override public <T> Set<Class<T>> getSubTypesOf(String pkg, Class<T> requestClass) { Set<Class<T>> set = new HashSet<>(16); String packageSearchPath = ResourcePatternResolver.CLASSPATH_ALL_URL_PREFIX + ClassUtils.convertClassNameToResourcePath(pkg) + '/' + "**/*.class"; try { Resource[] resources = resourcePatternResolver.getResources(packageSearchPath); for (Resource resource : resources) { Class<?> scanClass = getClassByResource(resource); if (requestClass.isAssignableFrom(scanClass)) { set.add((Class<T>) scanClass); } } } catch (IOException | ClassNotFoundException e) { LOGGER.error("scan path: {} failed", packageSearchPath, e); } return set; }
@Test void testGetSubTypesOf() { packageScan = new DefaultPackageScan(); Set<Class<MockClass>> subTypesOf = packageScan.getSubTypesOf(AnnotationClass.class.getPackage().getName(), MockClass.class); assertEquals(3, subTypesOf.size()); }
public static Optional<Method> findMethod(Class<?> clazz, String methodName, Class<?>[] paramsType) { if (clazz == null) { return Optional.empty(); } final String methodKey = buildMethodKey(clazz, methodName, paramsType); try { Method method = METHOD_CACHE.get(methodKey); if (method != null) { return Optional.of(method); } method = setObjectAccessible(clazz.getDeclaredMethod(methodName, paramsType)); METHOD_CACHE.put(methodKey, method); return Optional.of(method); } catch (NoSuchMethodException ex) { Optional<Method> method = findSuperClass(clazz, methodName, paramsType); if (method.isPresent()) { return method; } } return Optional.empty(); }
@Test public void findMethod() { final Optional<Method> findMethod = ReflectUtils.findMethod(TestReflect.class, "findMethod", null); Assert.assertTrue(findMethod.isPresent()); final Optional<Method> test = ReflectUtils.findMethod(TestReflect.class, "test", null); Assert.assertFalse(test.isPresent()); final Optional<Method> findMethodWithParam = ReflectUtils .findMethod(TestReflect.class, "findMethodWithParam", new Class[]{String.class, int.class}); Assert.assertTrue(findMethodWithParam.isPresent()); final Optional<Object> reflect = ReflectUtils.buildWithConstructor(ReflectUtils.class, null, null); Assert.assertTrue(reflect.isPresent()); final Optional<Object> methodCache = ReflectUtils.getFieldValue(reflect.get(), "METHOD_CACHE"); Assert.assertTrue(methodCache.isPresent() && methodCache.get() instanceof Map); Assert.assertFalse(((Map<?, ?>) methodCache.get()).isEmpty()); }
@Override public GrokPattern update(GrokPattern pattern) throws ValidationException { try { if (!validate(pattern)) { throw new ValidationException("Invalid pattern " + pattern); } } catch (GrokException | PatternSyntaxException e) { throw new ValidationException("Invalid pattern " + pattern + "\n" + e.getMessage()); } if (pattern.id() == null) { throw new ValidationException("Invalid pattern " + pattern); } WriteResult<GrokPattern, ObjectId> result = dbCollection.update(DBQuery.is("_id", new ObjectId(pattern.id())), pattern); if (result.isUpdateOfExisting()) { clusterBus.post(GrokPatternsUpdatedEvent.create(ImmutableSet.of(pattern.name()))); return pattern; } throw new ValidationException("Invalid pattern " + pattern); }
@Test @MongoDBFixtures("MongoDbGrokPatternServiceTest.json") public void update() throws ValidationException { assertThat(collection.countDocuments()).isEqualTo(3); GrokPattern toUpdate1 = GrokPattern.builder() .id("56250da2d400000000000001") .name("Test1") .pattern("123") .build(); final GrokPattern updatedPattern1 = service.update(toUpdate1); assertThat(updatedPattern1.name()).matches(toUpdate1.name()); assertThat(updatedPattern1.pattern()).matches(toUpdate1.pattern()); assertThat(collection.countDocuments()).isEqualTo(3); GrokPattern toUpdate2 = GrokPattern.builder() .id("56250da2d400000000000001") .name("Testxxx") .pattern("123") .build(); final GrokPattern updatedPattern2 = service.update(toUpdate2); assertThat(updatedPattern2.name()).matches(toUpdate2.name()); assertThat(updatedPattern2.pattern()).matches(toUpdate2.pattern()); assertThat(collection.countDocuments()).isEqualTo(3); GrokPattern toUpdate3 = GrokPattern.builder() .name("Testxxx") .pattern("123") .build(); boolean thrown = false; try { service.update(toUpdate3); } catch (ValidationException e) { thrown = true; } assertThat(thrown).isTrue(); assertThat(collection.countDocuments()).isEqualTo(3); GrokPattern toUpdate4 = GrokPattern.builder() .id("56250da2d400000000000321") .name("Testxxx") .pattern("123") .build(); thrown = false; try { service.update(toUpdate4); } catch (ValidationException e) { thrown = true; } assertThat(thrown).isTrue(); assertThat(collection.countDocuments()).isEqualTo(3); verify(clusterEventBus, times(2)).post(any(GrokPatternsUpdatedEvent.class)); }
public static Builder builder() { return new Builder(); }
@Test void fail_when_no_strategy() { assertThatThrownBy(() -> { MeasureTreeQuery.builder() .build(); }) .isInstanceOf(NullPointerException.class); }
public static String cleanPath(String path) throws InvalidPathException { validatePath(path); return FilenameUtils.separatorsToUnix(FilenameUtils.normalizeNoEndSeparator(path)); }
@Test public void cleanPathNoException() throws InvalidPathException { // test clean path assertEquals("/foo/bar", PathUtils.cleanPath("/foo/bar")); // test trailing slash assertEquals("/foo/bar", PathUtils.cleanPath("/foo/bar/")); // test redundant slashes assertEquals("/foo/bar", PathUtils.cleanPath("/foo//bar")); assertEquals("/foo/bar", PathUtils.cleanPath("/foo//bar//")); assertEquals("/foo/bar", PathUtils.cleanPath("/foo///////bar//////")); // test dots gets resolved assertEquals("/foo/bar", PathUtils.cleanPath("/foo/./bar")); assertEquals("/foo/bar", PathUtils.cleanPath("/foo/././bar")); assertEquals("/foo", PathUtils.cleanPath("/foo/bar/..")); assertEquals("/bar", PathUtils.cleanPath("/foo/../bar")); assertEquals("/", PathUtils.cleanPath("/foo/bar/../..")); // the following seems strange // TODO(jiri): Instead of returning null, throw InvalidPathException. assertNull(PathUtils.cleanPath("/foo/bar/../../..")); }
public long getClientExpiredTime() { return clientExpiredTime; }
@Test void testUpgradeConfig() throws InterruptedException { mockEnvironment.setProperty(ClientConstants.CLIENT_EXPIRED_TIME_CONFIG_KEY, String.valueOf(EXPIRED_TIME)); NotifyCenter.publishEvent(ServerConfigChangeEvent.newEvent()); TimeUnit.SECONDS.sleep(1); assertEquals(EXPIRED_TIME, clientConfig.getClientExpiredTime()); }
@Override public void createApiAccessLog(ApiAccessLogCreateReqDTO createDTO) { ApiAccessLogDO apiAccessLog = BeanUtils.toBean(createDTO, ApiAccessLogDO.class); apiAccessLogMapper.insert(apiAccessLog); }
@Test public void testCreateApiAccessLog() { // 准备参数 ApiAccessLogCreateReqDTO createDTO = randomPojo(ApiAccessLogCreateReqDTO.class); // 调用 apiAccessLogService.createApiAccessLog(createDTO); // 断言 ApiAccessLogDO apiAccessLogDO = apiAccessLogMapper.selectOne(null); assertPojoEquals(createDTO, apiAccessLogDO); }
@VisibleForTesting public static Api parse(String api) { return parse(api, false); }
@Test public void parseApi_badInputs() { // TODO(b/223670489): would be nice to use expectThrows() here for (String badApi : UNPARSEABLE_APIS) { assertThrows( "Api.parse(\"" + badApi + "\")", IllegalArgumentException.class, () -> Api.parse(badApi)); } }
public static Builder custom() { return new Builder(); }
@Test public void builderRefreshPeriodIsNull() throws Exception { exception.expect(NullPointerException.class); exception.expectMessage(REFRESH_PERIOD_MUST_NOT_BE_NULL); RateLimiterConfig.custom() .limitRefreshPeriod(null); }
public static HMM fit(int[][] observations, int[][] labels) { if (observations.length != labels.length) { throw new IllegalArgumentException("The number of observation sequences and that of label sequences are different."); } int N = 0; // the number of states int M = 0; // the number of symbols for (int i = 0; i < observations.length; i++) { if (observations[i].length != labels[i].length) { throw new IllegalArgumentException(String.format("The length of observation sequence %d and that of corresponding label sequence are different.", i)); } N = Math.max(N, MathEx.max(labels[i]) + 1); M = Math.max(M, MathEx.max(observations[i]) + 1); } double[] pi = new double[N]; double[][] a = new double[N][N]; double[][] b = new double[N][M]; for (int i = 0; i < observations.length; i++) { pi[labels[i][0]]++; b[labels[i][0]][observations[i][0]]++; for (int j = 1; j < observations[i].length; j++) { a[labels[i][j - 1]][labels[i][j]]++; b[labels[i][j]][observations[i][j]]++; } } MathEx.unitize1(pi); for (int i = 0; i < N; i++) { MathEx.unitize1(a[i]); MathEx.unitize1(b[i]); } return new HMM(pi, Matrix.of(a), Matrix.of(b)); }
@Test public void testFit() { System.out.println("fit"); MathEx.setSeed(19650218); // to get repeatable results. EmpiricalDistribution initial = new EmpiricalDistribution(pi); EmpiricalDistribution[] transition = new EmpiricalDistribution[a.length]; for (int i = 0; i < transition.length; i++) { transition[i] = new EmpiricalDistribution(a[i]); } EmpiricalDistribution[] emission = new EmpiricalDistribution[b.length]; for (int i = 0; i < emission.length; i++) { emission[i] = new EmpiricalDistribution(b[i]); } int[][] sequences = new int[5000][]; int[][] labels = new int[5000][]; for (int i = 0; i < sequences.length; i++) { sequences[i] = new int[30 * (MathEx.randomInt(5) + 1)]; labels[i] = new int[sequences[i].length]; int state = (int) initial.rand(); sequences[i][0] = (int) emission[state].rand(); labels[i][0] = state; for (int j = 1; j < sequences[i].length; j++) { state = (int) transition[state].rand(); sequences[i][j] = (int) emission[state].rand(); labels[i][j] = state; } } HMM model = HMM.fit(sequences, labels); System.out.println(model); double[] expPi2 = {0.5076, 0.4924}; double[][] expA2 = {{0.8002, 0.1998}, {0.1987, 0.8013}}; double[][] expB2 = {{0.5998, 0.4002}, {0.4003, 0.5997}}; double[] pi2 = model.getInitialStateProbabilities(); for (int i = 0; i < pi.length; i++) { assertEquals(expPi2[i], pi2[i], 1E-4); } Matrix a2 = model.getStateTransitionProbabilities(); for (int i = 0; i < a.length; i++) { for (int j = 0; j < a[i].length; j++) { assertEquals(expA2[i][j], a2.get(i, j), 1E-4); } } Matrix b2 = model.getSymbolEmissionProbabilities(); for (int i = 0; i < b.length; i++) { for (int j = 0; j < b[i].length; j++) { assertEquals(expB2[i][j], b2.get(i, j), 1E-4); } } }
@Override public MepEntry getMep(MdId mdName, MaIdShort maName, MepId mepId) throws CfmConfigException { MepKeyId key = new MepKeyId(mdName, maName, mepId); //Will throw IllegalArgumentException if ma does not exist cfmMdService.getMaintenanceAssociation(mdName, maName); Optional<Mep> mepOptional = mepStore.getMep(key); if (mepOptional.isPresent()) { Mep mep = mepOptional.get(); DeviceId mepDeviceId = mep.deviceId(); if (deviceService.getDevice(mepDeviceId) == null) { throw new CfmConfigException("Device not found " + mepDeviceId); } else if (!deviceService.getDevice(mepDeviceId).is(CfmMepProgrammable.class)) { throw new CfmConfigException("Device " + mepDeviceId + " does not support CfmMepProgrammable behaviour."); } log.debug("Retrieving MEP reults for Mep {} in MD {}, MA {} on Device {}", mep.mepId(), mdName, maName, mepDeviceId); return deviceService.getDevice(mepDeviceId) .as(CfmMepProgrammable.class).getMep(mdName, maName, mepId); } else { return null; } }
@Test public void testGetMepMissing() { expect(mdService.getMaintenanceAssociation(MDNAME1, MANAME1)) .andReturn(Optional.ofNullable(ma1)) .anyTimes(); replay(mdService); expect(deviceService.getDevice(DEVICE_ID1)).andReturn(null).anyTimes(); replay(deviceService); expect(driverService.getDriver(DEVICE_ID1)).andReturn(testDriver).anyTimes(); replay(driverService); try { mepManager.getMep(MDNAME1, MANAME1, MEPID1); fail("Expecting CfmConfigException because device does not exist"); } catch (CfmConfigException e) { assertEquals("Device not found netconf:1.2.3.4:830", e.getMessage()); } }
@Override public String getName() { return _name; }
@Test public void testStringLowerTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("lower(%s)", STRING_ALPHANUM_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "lower"); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _stringAlphaNumericSVValues[i].toLowerCase(); } testTransformFunction(transformFunction, expectedValues); }
public void allocationQuantum(int allocationQuantum) { checkPositive(allocationQuantum, "allocationQuantum"); this.allocationQuantum = allocationQuantum; }
@Test public void samePriorityShouldDistributeBasedOnData() throws Http2Exception { // Root the streams at the connection with the same weights. setPriority(STREAM_A, 0, DEFAULT_PRIORITY_WEIGHT, false); setPriority(STREAM_B, 0, DEFAULT_PRIORITY_WEIGHT, false); setPriority(STREAM_C, 0, DEFAULT_PRIORITY_WEIGHT, false); setPriority(STREAM_D, 0, DEFAULT_PRIORITY_WEIGHT, false); initState(STREAM_A, 400, true); initState(STREAM_B, 500, true); initState(STREAM_C, 0, true); initState(STREAM_D, 700, true); // Set allocation quantum to 1 so it is easier to see the ratio of total bytes written between each stream. distributor.allocationQuantum(1); assertTrue(write(999)); assertEquals(333, captureWrites(STREAM_A)); assertEquals(333, captureWrites(STREAM_B)); verifyWrite(times(1), STREAM_C, 0); assertEquals(333, captureWrites(STREAM_D)); }
public static Coordinate wgs84ToBd09(double lng, double lat) { final Coordinate gcj02 = wgs84ToGcj02(lng, lat); return gcj02ToBd09(gcj02.lng, gcj02.lat); }
@Test public void wgs84toBd09Test() { final CoordinateUtil.Coordinate coordinate = CoordinateUtil.wgs84ToBd09(116.404, 39.915); assertEquals(116.41662724378733D, coordinate.getLng(), 0); assertEquals(39.922699552216216D, coordinate.getLat(), 0); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@Test void testResultTypeQueryable() { TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( new MyQueryableMapper<Integer>(), BasicTypeInfo.STRING_TYPE_INFO); assertThat(ti).isEqualTo(BasicTypeInfo.INT_TYPE_INFO); }
@Override @Nullable public ResultChunk nextChunk() throws IOException { if (limitReached()) { LOG.debug("[{}] Reached limit for query {}", queryHash, getOriginalQuery()); return null; } final R result = this.initialResult != null ? this.initialResult : nextSearchResult(); this.lastSearchResponse = result; this.initialResult = null; final List<ResultMessage> resultMessages = result != null ? collectMessagesFromResult(result) : List.of(); if (resultMessages.isEmpty()) { // chunking exhausted LOG.debug("[{}] Reached end of {} results for query {}", queryHash, getChunkingMethodName(), getOriginalQuery()); return null; } final int remainingResultsForLimit = limit - resultCount; final List<ResultMessage> resultMessagesSlice = (limit != -1 && remainingResultsForLimit < resultMessages.size()) ? resultMessages.subList(0, remainingResultsForLimit) : resultMessages; resultCount += resultMessagesSlice.size(); return new ResultChunk(fields, chunkId++, resultMessagesSlice); }
@Test void getsFirstChunkIfInitialResultIsNull(ResultMessageFactory resultMessageFactory) throws Exception { toTest = new ServerlessChunkedQueryResultSimulation(resultMessageFactory, "Client", null, "", List.of("name"), 10, 2 ); final ResultChunk resultChunk = toTest.nextChunk(); assertThat(resultChunk.isFirstChunk()).isTrue(); final List<ResultMessage> messages = resultChunk.messages(); assertThat(messages) .isNotNull() .hasSize(2); verifyElementAt(messages, 0, BACKING_RESULT_LIST.get(0)); verifyElementAt(messages, 1, BACKING_RESULT_LIST.get(1)); }
public static void validatePermission(@Nullable String tableName, AccessType accessType, @Nullable HttpHeaders httpHeaders, String endpointUrl, AccessControl accessControl) { String userMessage = getUserMessage(tableName, accessType, endpointUrl); String rawTableName = TableNameBuilder.extractRawTableName(tableName); try { if (rawTableName == null) { if (accessControl.hasAccess(accessType, httpHeaders, endpointUrl)) { return; } } else { if (accessControl.hasAccess(rawTableName, accessType, httpHeaders, endpointUrl)) { return; } } } catch (WebApplicationException exception) { // throwing the exception if it's WebApplicationException throw exception; } catch (Throwable t) { // catch and log Throwable for NoSuchMethodError which can happen when there are classpath conflicts // otherwise, grizzly will return a 500 without any logs or indication of what failed throw new ControllerApplicationException(LOGGER, "Caught exception while validating permission for " + userMessage, Response.Status.INTERNAL_SERVER_ERROR, t); } throw new ControllerApplicationException(LOGGER, "Permission is denied for " + userMessage, Response.Status.FORBIDDEN); }
@Test public void testValidatePermissionDenied() { AccessControl ac = Mockito.mock(AccessControl.class); HttpHeaders mockHttpHeaders = Mockito.mock(HttpHeaders.class); Mockito.when(ac.hasAccess(_table, AccessType.READ, mockHttpHeaders, _endpoint)).thenReturn(false); try { AccessControlUtils.validatePermission(_table, AccessType.READ, mockHttpHeaders, _endpoint, ac); Assert.fail("Expected ControllerApplicationException"); } catch (ControllerApplicationException e) { Assert.assertTrue(e.getMessage().contains("Permission is denied")); Assert.assertEquals(e.getResponse().getStatus(), Response.Status.FORBIDDEN.getStatusCode()); } }
protected int getDiffSize() { return diff.size(); }
@Test public void testReplaceRootLogger() { KafkaBrokerLoggingConfigurationDiff klcd = new KafkaBrokerLoggingConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(emptyList())); assertThat(klcd.getDiffSize(), is(0)); }
public void setCommand(Command s, int index) { dispatcher.performAction(s, index); }
@Test void testSetCommand() { final var model = new GiantModel("giant1", Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); Action action = new Action(model); GiantView giantView = new GiantView(); Dispatcher dispatcher = new Dispatcher(giantView); assertEquals(Nourishment.SATURATED, model.getNourishment()); dispatcher.addAction(action); GiantController controller = new GiantController(dispatcher); controller.setCommand(new Command(Fatigue.ALERT, Health.HEALTHY, Nourishment.HUNGRY), 0); assertEquals(Fatigue.ALERT, model.getFatigue()); assertEquals(Health.HEALTHY, model.getHealth()); assertEquals(Nourishment.HUNGRY, model.getNourishment()); }
@Transactional public void update(MemberDto memberDto, Long templateId, UpdateTemplateRequest updateTemplateRequest) { Member member = memberRepository.fetchById(memberDto.id()); Category category = categoryRepository.fetchById(updateTemplateRequest.categoryId()); validateCategoryAuthorizeMember(category, member); Template template = templateRepository.fetchById(templateId); validateTemplateAuthorizeMember(template, member); template.updateTemplate(updateTemplateRequest.title(), updateTemplateRequest.description(), category); updateSourceCodes(updateTemplateRequest, template); updateTags(updateTemplateRequest, template); validateSourceCodesCount(updateTemplateRequest, template); }
@Test @DisplayName("템플릿 수정 성공") void updateTemplateSuccess() { // given MemberDto memberDto = MemberDtoFixture.getFirstMemberDto(); Member member = memberRepository.fetchById(memberDto.id()); CreateTemplateRequest createdTemplate = makeTemplateRequest("title"); Template template = saveTemplate(createdTemplate, new Category("category1", member), member); categoryRepository.save(new Category("category2", member)); // when UpdateTemplateRequest updateTemplateRequest = makeUpdateTemplateRequest(1L); templateService.update(memberDto, template.getId(), updateTemplateRequest); Template updateTemplate = templateRepository.fetchById(template.getId()); List<SourceCode> sourceCodes = sourceCodeRepository.findAllByTemplate(template); Thumbnail thumbnail = thumbnailRepository.fetchById(template.getId()); List<Tag> tags = templateTagRepository.findAllByTemplate(updateTemplate).stream() .map(TemplateTag::getTag) .toList(); // then assertAll( () -> assertThat(updateTemplate.getTitle()).isEqualTo("updateTitle"), () -> assertThat(thumbnail.getSourceCode().getId()).isEqualTo(2L), () -> assertThat(sourceCodes).hasSize(3), () -> assertThat(updateTemplate.getCategory().getId()).isEqualTo(1L), () -> assertThat(tags).hasSize(2), () -> assertThat(tags.get(1).getName()).isEqualTo("tag3") ); }
@Override public void merge(Accumulator<Long, Long> other) { this.min = Math.min(this.min, other.getLocalValue()); }
@Test void testMerge() { LongMinimum min1 = new LongMinimum(); min1.add(1234567890987654321L); LongMinimum min2 = new LongMinimum(); min2.add(5678909876543210123L); min2.merge(min1); assertThat(min2.getLocalValue().longValue()).isEqualTo(1234567890987654321L); min1.merge(min2); assertThat(min1.getLocalValue().longValue()).isEqualTo(1234567890987654321L); }
public synchronized File getTempDirectory() throws IOException { if (tempDirectory == null) { final File baseTemp = new File(getString(Settings.KEYS.TEMP_DIRECTORY, System.getProperty("java.io.tmpdir"))); tempDirectory = FileUtils.createTempDirectory(baseTemp); } return tempDirectory; }
@Test public void testGetTempDirectory() throws Exception { File tmp = getSettings().getTempDirectory(); Assert.assertTrue(tmp.exists()); }
public static void mergeParams( Map<String, ParamDefinition> params, Map<String, ParamDefinition> paramsToMerge, MergeContext context) { if (paramsToMerge == null) { return; } Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream()) .forEach( name -> { ParamDefinition paramToMerge = paramsToMerge.get(name); if (paramToMerge == null) { return; } if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) { Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name); Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name); mergeParams( baseMap, toMergeMap, MergeContext.copyWithParentMode( context, params.getOrDefault(name, paramToMerge).getMode())); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else if (paramToMerge.getType() == ParamType.STRING_MAP && paramToMerge.isLiteral()) { Map<String, String> baseMap = stringMapValueOrEmpty(params, name); Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name); baseMap.putAll(toMergeMap); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else { params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, paramToMerge.getValue())); } }); }
@Test public void testMergeNotAllowUpdateInternalMode() { // Don't allow to modify the source AssertHelper.assertThrows( "Should not allow modifying internal mode", MaestroValidationException.class, "Cannot modify system mode for parameter [tomerge]", new Runnable() { @SneakyThrows @Override public void run() { Map<String, ParamDefinition> allParams = parseParamDefMap("{'tomerge': {'type': 'LONG','value': 2}}"); Map<String, ParamDefinition> paramsToMerge = parseParamDefMap( "{'tomerge': {'type': 'LONG', 'value': 3, 'internal_mode': 'OPTIONAL'}}"); ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext); } }); }
public String defaultRemoteUrl() { final String sanitizedUrl = sanitizeUrl(); try { URI uri = new URI(sanitizedUrl); if (uri.getUserInfo() != null) { uri = new URI(uri.getScheme(), removePassword(uri.getUserInfo()), uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); return uri.toString(); } } catch (URISyntaxException e) { return sanitizedUrl; } return sanitizedUrl; }
@Test void shouldReturnAURLWithoutPassword() { assertThat(new HgUrlArgument("http://user:pwd@url##branch").defaultRemoteUrl(), is("http://user@url#branch")); }
public static void validatePositivePowerOfTwo(final int value) { if (!BitUtil.isPowerOfTwo(value)) { throw new IllegalArgumentException("value must be a positive power of two: " + value); } }
@Test void validatePositivePowerOfTwoFailWith15() { assertThrows(IllegalArgumentException.class, () -> CollectionUtil.validatePositivePowerOfTwo(15)); }
State getState() { return state; }
@Test public void verify_regular_start_and_graceful_stop_cycle() { assertThat(underTest.getState()).isEqualTo(INIT); verifyMoveTo(STARTING); verifyMoveTo(OPERATIONAL); verifyMoveTo(STOPPING); verifyMoveTo(FINALIZE_STOPPING); verifyMoveTo(STOPPED); }
public static String toUnderline(String src, boolean isUpper) { return toUnderline(src, '_', isUpper); }
@Test public void testToUnderline() { String result = FieldUtils.toUnderline("ToUnderline", true); Assert.assertEquals("TO_UNDERLINE", result); String result1 = FieldUtils.toUnderline("ToUnderline", false); Assert.assertEquals("to_underline", result1); }
public synchronized String sign(String str) { if (str == null || str.length() == 0) { throw new IllegalArgumentException("NULL or empty string to sign"); } byte[] secret = secretProvider.getCurrentSecret(); String signature = computeSignature(secret, str); return str + SIGNATURE + signature; }
@Test public void testSignature() throws Exception { Signer signer = new Signer(createStringSignerSecretProvider()); String s1 = signer.sign("ok"); String s2 = signer.sign("ok"); String s3 = signer.sign("wrong"); Assert.assertEquals(s1, s2); Assert.assertNotEquals(s1, s3); }
@Override public void visitField(QueryVisitorFieldEnvironment queryVisitorFieldEnvironment) { // Each new property should put as required and we should not allow additional properties. ArrayNode required = getRequiredArrayNode(); required.add(queryVisitorFieldEnvironment.getFieldDefinition().getName()); // Even if type is marked as optional in the GraphQL Schema, it must be present and // serialized as null into the Json response. We have to unwrap it first. GraphQLOutputType outputType = queryVisitorFieldEnvironment.getFieldDefinition().getType(); Type definitionType = queryVisitorFieldEnvironment.getFieldDefinition().getDefinition().getType(); if (TypeUtil.isNonNull(definitionType)) { definitionType = TypeUtil.unwrapOne(definitionType); } // Add this field to current node. ObjectNode fieldNode = currentNode.putObject(queryVisitorFieldEnvironment.getFieldDefinition().getName()); TypeInfo definitionTypeInfo = TypeInfo.typeInfo(definitionType); // Treat most common case first: we've got a scalar property. if (ScalarInfo.isGraphqlSpecifiedScalar(definitionTypeInfo.getName())) { fieldNode.put(JSON_SCHEMA_TYPE, getJsonScalarType(definitionTypeInfo.getName())); } else if (outputType instanceof GraphQLObjectType) { // Then we deal with objects. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = fieldNode.putObject(JSON_SCHEMA_PROPERTIES); parentNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); fieldNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = fieldNode; currentNode = properties; } else if (TypeUtil.isList(definitionType)) { // Then we deal with lists. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_ARRAY_TYPE); ObjectNode items = fieldNode.putObject(JSON_SCHEMA_ITEMS); // Depending on item type, we should initialize an object structure. TypeName itemTypeInfo = TypeUtil.unwrapAll(definitionType); if (!ScalarInfo.isGraphqlSpecifiedScalar(itemTypeInfo.getName())) { items.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = items.putObject(JSON_SCHEMA_PROPERTIES); items.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = items; currentNode = properties; } } else if (outputType instanceof GraphQLEnumType enumType) { // Then we deal with enumerations. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_STRING_TYPE); ArrayNode enumNode = fieldNode.putArray(JSON_SCHEMA_ENUM); for (GraphQLEnumValueDefinition valDef : enumType.getValues()) { enumNode.add(valDef.getName()); } } }
@Test public void testVisitFieldWithScalarType() { QueryVisitorFieldEnvironment environment = mock(QueryVisitorFieldEnvironment.class); GraphQLOutputType outputType = mock(GraphQLOutputType.class); TypeName definitionType = TypeName.newTypeName().name("String").build(); when(environment.getFieldDefinition()).thenReturn(mock(GraphQLFieldDefinition.class)); when(environment.getFieldDefinition().getDefinition()).thenReturn(mock(FieldDefinition.class)); when(environment.getFieldDefinition().getType()).thenReturn(outputType); when(environment.getFieldDefinition().getDefinition().getType()).thenReturn(definitionType); when(environment.getFieldDefinition().getName()).thenReturn("scalarField"); visitor.visitField(environment); JsonNode fieldNode = jsonSchemaData.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_PROPERTIES).get("scalarField"); assertEquals("string", fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_TYPE).asText()); }
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return false; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length); buffer.putInt(typeOffset(recordIndex), msgTypeId); buffer.putIntOrdered(lengthOffset(recordIndex), recordLength); return true; }
@Test void shouldRejectWriteWhenBufferFull() { final int length = 8; final long head = 0L; final long tail = head + CAPACITY; when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head); when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail); final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(1024)); final int srcIndex = 0; assertFalse(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length)); verify(buffer, never()).putLongOrdered(anyInt(), anyInt()); }
public boolean matchesBeacon(Beacon beacon) { // All identifiers must match, or the corresponding region identifier must be null. for (int i = mIdentifiers.size(); --i >= 0; ) { final Identifier identifier = mIdentifiers.get(i); Identifier beaconIdentifier = null; if (i < beacon.mIdentifiers.size()) { beaconIdentifier = beacon.getIdentifier(i); } if ((beaconIdentifier == null && identifier != null) || (beaconIdentifier != null && identifier != null && !identifier.equals(beaconIdentifier))) { return false; } } if (mBluetoothAddress != null && !mBluetoothAddress.equalsIgnoreCase(beacon.mBluetoothAddress)) { return false; } return true; }
@Test public void testBeaconMatchesRegionWithSameIdentifier1() { Beacon beacon = new AltBeacon.Builder().setId1("1").setId2("2").setId3("3").setRssi(4) .setBeaconTypeCode(5).setTxPower(6).setBluetoothAddress("1:2:3:4:5:6").build(); Region region = new Region("myRegion", Identifier.parse("1"), null, null); assertTrue("Beacon should match region with first identifier the same", region.matchesBeacon(beacon)); }
@Override public void startScheduling() { final Set<SchedulingPipelinedRegion> sourceRegions = IterableUtils.toStream(schedulingTopology.getAllPipelinedRegions()) .filter(this::isSourceRegion) .collect(Collectors.toSet()); maybeScheduleRegions(sourceRegions); }
@Test void testSchedulingTopologyWithHybridCrossRegionConsumedPartitionGroups() throws Exception { final JobVertex v1 = createJobVertex("v1", 4); final JobVertex v2 = createJobVertex("v2", 3); final JobVertex v3 = createJobVertex("v3", 2); v2.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v3.connectNewDataSetAsInput( v2, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL); v3.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3)); final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build(); final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder() .setJobGraph(jobGraph) .build(EXECUTOR_RESOURCE.getExecutor()); final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology(); // Test whether the topology is built correctly final List<SchedulingPipelinedRegion> regions = new ArrayList<>(); schedulingTopology.getAllPipelinedRegions().forEach(regions::add); assertThat(regions).hasSize(2); final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0]; final Set<ExecutionVertexID> region1 = new HashSet<>(); schedulingTopology .getPipelinedRegionOfVertex(v31.getID()) .getVertices() .forEach(vertex -> region1.add(vertex.getId())); assertThat(region1).hasSize(5); final ExecutionVertex v32 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[1]; final Set<ExecutionVertexID> region2 = new HashSet<>(); schedulingTopology .getPipelinedRegionOfVertex(v32.getID()) .getVertices() .forEach(vertex -> region2.add(vertex.getId())); assertThat(region2).hasSize(4); startScheduling(schedulingTopology); assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2); // Test whether region 1 is scheduled correctly final List<ExecutionVertexID> scheduledVertices1 = testingSchedulerOperation.getScheduledVertices().get(0); assertThat(scheduledVertices1).hasSize(5); for (ExecutionVertexID vertexId : scheduledVertices1) { assertThat(region1).contains(vertexId); } // Test whether region 2 is scheduled correctly final List<ExecutionVertexID> scheduledVertices2 = testingSchedulerOperation.getScheduledVertices().get(1); assertThat(scheduledVertices2).hasSize(4); for (ExecutionVertexID vertexId : scheduledVertices2) { assertThat(region2).contains(vertexId); } }
public static String toTimeAsString(final long timeMilli) { if (timeMilli < 1) { return "0ms"; } final StringBuilder stringBuilder = new StringBuilder(); scanTimeUnits(timeMilli, stringBuilder); return stringBuilder.toString(); }
@Test public void testToTimeAsString() { assertEquals("600ms", ConnectorConfigGeneratorUtils.toTimeAsString(TimeUtils.toMilliSeconds("600ms"))); assertEquals("0ms", ConnectorConfigGeneratorUtils.toTimeAsString(TimeUtils.toMilliSeconds("0ms"))); assertEquals("1s", ConnectorConfigGeneratorUtils.toTimeAsString(TimeUtils.toMilliSeconds("1000ms"))); assertEquals("1m600ms", ConnectorConfigGeneratorUtils.toTimeAsString(TimeUtils.toMilliSeconds("1m600ms"))); assertEquals("1m1s100ms", ConnectorConfigGeneratorUtils.toTimeAsString(TimeUtils.toMilliSeconds("1m1100ms"))); assertEquals("5m10s300ms", ConnectorConfigGeneratorUtils.toTimeAsString(310300)); assertEquals("5s500ms", ConnectorConfigGeneratorUtils.toTimeAsString(5500)); assertEquals("1h50m", ConnectorConfigGeneratorUtils.toTimeAsString(6600000)); assertEquals("2d3h4m", ConnectorConfigGeneratorUtils.toTimeAsString(Duration.parse("P2DT3H4M").toMillis())); assertEquals("2d4m", ConnectorConfigGeneratorUtils.toTimeAsString(Duration.parse("P2DT4M").toMillis())); }
public static void stringNotEmptyAndThenExecute(String source, Runnable runnable) { if (StringUtils.isNotEmpty(source)) { try { runnable.run(); } catch (Exception e) { LogUtils.NAMING_LOGGER.error("string not empty and then execute cause an exception.", e); } } }
@Test void testStringNotEmptyAndThenExecuteException() { String word = "run"; Runnable task = Mockito.mock(Runnable.class); doThrow(new RuntimeException("test")).when(task).run(); TemplateUtils.stringNotEmptyAndThenExecute(word, task); Mockito.verify(task, Mockito.times(1)).run(); // NO exception thrown }
MethodSpec buildFunction(AbiDefinition functionDefinition) throws ClassNotFoundException { return buildFunction(functionDefinition, true); }
@Test public void testBuildPayableFunctionTransaction() throws Exception { AbiDefinition functionDefinition = new AbiDefinition( false, Arrays.asList(new NamedType("param", "uint8")), "functionName", Collections.emptyList(), "type", true); MethodSpec methodSpec = solidityFunctionWrapper.buildFunction(functionDefinition); String expected = "public org.web3j.protocol.core.RemoteFunctionCall<org.web3j.protocol.core.methods.response.TransactionReceipt> functionName(\n" + " java.math.BigInteger param, java.math.BigInteger weiValue) {\n" + " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n" + " FUNC_FUNCTIONNAME, \n" + " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param)), \n" + " java.util.Collections.<org.web3j.abi.TypeReference<?>>emptyList());\n" + " return executeRemoteCallTransaction(function, weiValue);\n" + "}\n"; assertEquals(methodSpec.toString(), (expected)); }
@Override public SpringEmbeddedCacheManager getObject() throws Exception { return this.cacheManager; }
@Test public void testIfSpringEmbeddedCacheManagerFactoryBeanAllowesOverridingConfigurationBuilder() throws Exception { ConfigurationBuilder overriddenBuilder = new ConfigurationBuilder(); overriddenBuilder.locking().concurrencyLevel(100); objectUnderTest = SpringEmbeddedCacheManagerFactoryBeanBuilder .defaultBuilder().fromFile(NAMED_ASYNC_CACHE_CONFIG_LOCATION, getClass()) .withConfigurationBuilder(overriddenBuilder).build(); final SpringEmbeddedCacheManager springEmbeddedCacheManager = objectUnderTest.getObject(); assertEquals( "Concurrency value of LockingLocking for cache configured in" + CACHE_NAME_FROM_CONFIGURATION_FILE + "is equal to 5000. But later Configuration Builder overrides " + "this setting to 100. Obviously created SpringEmbeddedCacheManagerFactoryBean does not support " + "this kind of overriding.", 100, springEmbeddedCacheManager.getNativeCacheManager().getDefaultCacheConfiguration().locking() .concurrencyLevel()); }
@PublicAPI(usage = ACCESS) public static ArchRule testClassesShouldResideInTheSamePackageAsImplementation() { return testClassesShouldResideInTheSamePackageAsImplementation("Test"); }
@Test public void test_class_in_same_package_should_pass_when_test_class_and_implementation_class_reside_in_the_same_package() { assertThatRule(testClassesShouldResideInTheSamePackageAsImplementation()) .checking(new ClassFileImporter().importPackagesOf(ImplementationClassWithCorrectPackage.class)) .hasNoViolation(); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } return FEELFnResult.ofResult( BigDecimal.valueOf( list.size() ) ); }
@Test void invokeParamArrayNull() { FunctionTestUtil.assertResultError(countFunction.invoke((Object[]) null), InvalidParametersEvent.class); }
public void removeVertex(V vertex) { neighbors.remove(vertex); }
@Test void removeVertex() { graph.removeVertex('B'); assertFalse(graph.containsVertex('B')); }
@Override public HttpResponse get() throws InterruptedException, ExecutionException { try { final Object result = process(0, null); if (result instanceof Throwable) { throw new ExecutionException((Throwable) result); } return (HttpResponse) result; } finally { isDone = true; } }
@Test(expected = InterruptedException.class) public void errGetTimeout() throws ExecutionException, InterruptedException, TimeoutException { get(new InterruptedException(), true); }
public static FileSystem write(final FileSystem fs, final Path path, final byte[] bytes) throws IOException { Objects.requireNonNull(path); Objects.requireNonNull(bytes); try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) { out.write(bytes); } return fs; }
@Test public void testWriteStringNoCharSetFileContext() throws IOException { URI uri = tmp.toURI(); Configuration conf = new Configuration(); FileContext fc = FileContext.getFileContext(uri, conf); Path testPath = new Path(new Path(uri), "writestring.out"); String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; FileUtil.write(fc, testPath, write); String read = FileUtils.readFileToString(new File(testPath.toUri()), StandardCharsets.UTF_8); assertEquals(write, read); }
@Override protected double maintain() { List<Node> provisionedSnapshot; try { NodeList nodes; // Host and child nodes are written in separate transactions, but both are written while holding the // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children // of newly provisioned hosts. try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) { nodes = nodeRepository().nodes().list(); } provisionedSnapshot = provision(nodes); } catch (NodeAllocationException | IllegalStateException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage()); return 0; // avoid removing excess hosts } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts", e); return 0; // avoid removing excess hosts } return markForRemoval(provisionedSnapshot); }
@Test public void preprovision_with_shared_host() { tester = new DynamicProvisioningTester().addInitialNodes(); // Makes provisioned hosts 48-128-1000-10 tester.hostProvisioner.setHostFlavor("host4"); var clusterCapacity = new ClusterCapacity(2, 1.0, 30.0, 20.0, 3.0, "fast", "remote", "x86_64", null); setPreprovisionCapacityFlag(tester, clusterCapacity); assertEquals(0, tester.hostProvisioner.provisionedHosts().size()); assertEquals(9, tester.nodeRepository.nodes().list().size()); assertTrue(node("host2").isPresent()); assertTrue(node("host2-1").isPresent()); assertTrue(node("host3").isPresent()); assertTrue(node("host100").isEmpty()); // The first cluster will be allocated to host3 and a new host host100. // host100 will be a large shared host specified above. tester.maintain(); verifyFirstMaintain(tester); // Second maintain should be a no-op, otherwise we did wrong in the first maintain. tester.maintain(); verifyFirstMaintain(tester); // Add a second cluster equal to the first. It should fit on existing host3 and host100. setPreprovisionCapacityFlag(tester, clusterCapacity, clusterCapacity); tester.maintain(); verifyFirstMaintain(tester); // Change second cluster such that it doesn't fit on host3, but does on host100, // and with a size of 2 it should allocate a new shared host. // The node allocation code prefers to allocate to the shared hosts instead of host3 (at least // in this test, due to skew), so host3 will be deprovisioned when host101 is provisioned. // host3 is a 24-64-100-10 while host100 is 48-128-1000-10. setPreprovisionCapacityFlag(tester, clusterCapacity, new ClusterCapacity(2, 24.0, 64.0, 100.0, 1.0, "fast", "remote", "x86_64", null)); tester.maintain(); assertEquals(2, tester.hostProvisioner.provisionedHosts().size()); assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(8, tester.nodeRepository.nodes().list().not().state(State.deprovisioned).size()); // 3 removed, 2 added assertSame("preprovision capacity is prefered on shared hosts", State.deprovisioned, node("host3").get().state()); assertTrue(node("host100").isPresent()); assertTrue(node("host101").isPresent()); // If the preprovision capacity is reduced, we should see shared hosts deprovisioned. setPreprovisionCapacityFlag(tester, new ClusterCapacity(1, 1.0, 30.0, 20.0, 3.0, "fast", "remote", "x86_64", null)); tester.maintain(); assertEquals("one provisioned host has been deprovisioned, so there are 2 -> 1 provisioned hosts", 1, tester.hostProvisioner.provisionedHosts().size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(7, tester.nodeRepository.nodes().list().not().state(State.deprovisioned).size()); // 4 removed, 2 added if (node("host100").isPresent()) { assertSame("host101 is superfluous and should have been deprovisioned", State.deprovisioned, node("host101").get().state()); } else { assertTrue("host101 is required for preprovision capacity", node("host101").isPresent()); } // If a host with another architecture is added to preprovision capacity, a shared host should be added. setPreprovisionCapacityFlag(tester, new ClusterCapacity(1, 2.0, 30.0, 20.0, 3.0, "fast", "remote", "x86_64", null), new ClusterCapacity(1, 2.0, 30.0, 20.0, 3.0, "fast", "remote", "arm64", null)); tester.hostProvisioner.setHostFlavor("arm64"); tester.maintain(); assertEquals(2, tester.hostProvisioner.provisionedHosts().size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(2, 30, 20, 3, fast, remote, arm64))); }
public static boolean isSupportedLongCon(Member member) { if (member.getAbilities() == null || member.getAbilities().getRemoteAbility() == null) { return false; } boolean oldVerJudge = member.getAbilities().getRemoteAbility().isSupportRemoteConnection(); return member.isGrpcReportEnabled() || oldVerJudge; }
@Test void testIsSupportedLongCon() { assertFalse(MemberUtil.isSupportedLongCon(originalMember)); originalMember.getAbilities().getRemoteAbility().setSupportRemoteConnection(true); assertTrue(MemberUtil.isSupportedLongCon(originalMember)); originalMember.getAbilities().setRemoteAbility(null); assertFalse(MemberUtil.isSupportedLongCon(originalMember)); originalMember.setAbilities(null); assertFalse(MemberUtil.isSupportedLongCon(originalMember)); }
@NonNull static String getImageUrl(List<FastDocumentFile> files, Uri folderUri) { // look for special file names for (String iconLocation : PREFERRED_FEED_IMAGE_FILENAMES) { for (FastDocumentFile file : files) { if (iconLocation.equals(file.getName())) { return file.getUri().toString(); } } } // use the first image in the folder if existing for (FastDocumentFile file : files) { String mime = file.getType(); if (mime != null && (mime.startsWith("image/jpeg") || mime.startsWith("image/png"))) { return file.getUri().toString(); } } // use default icon as fallback return Feed.PREFIX_GENERATIVE_COVER + folderUri; }
@Test public void testGetImageUrl_OtherImageFilenameJpeg() { List<FastDocumentFile> folder = Arrays.asList(mockDocumentFile("audio.mp3", "audio/mp3"), mockDocumentFile("my-image.jpeg", "image/jpeg")); String imageUrl = LocalFeedUpdater.getImageUrl(folder, Uri.EMPTY); assertThat(imageUrl, endsWith("my-image.jpeg")); }
public static JvmMetrics initSingleton(String processName, String sessionId) { return Singleton.INSTANCE.init(processName, sessionId); }
@Test public void testJvmMetricsSingletonWithSameProcessName() { JvmMetrics jvmMetrics1 = org.apache.hadoop.metrics2.source.JvmMetrics .initSingleton("test", null); JvmMetrics jvmMetrics2 = org.apache.hadoop.metrics2.source.JvmMetrics .initSingleton("test", null); Assert.assertEquals("initSingleton should return the singleton instance", jvmMetrics1, jvmMetrics2); }
public CMap parsePredefined(String name) throws IOException { try (RandomAccessRead randomAccessRead = getExternalCMap(name)) { // deactivate strict mode strictMode = false; return parse(randomAccessRead); } }
@Test void testIdentity() throws IOException { CMap cMap = new CMapParser().parsePredefined("Identity-H"); assertEquals(65, cMap.toCID(new byte[] { 0, 65 }), "Indentity-H CID 65"); assertEquals(12345, cMap.toCID(new byte[] { 0x30, 0x39 }), "Indentity-H CID 12345"); assertEquals(0xFFFF, cMap.toCID(new byte[] { (byte) 0xFF, (byte) 0xFF }), "Indentity-H CID 0xFFFF"); }
@Override public BytesStreamMessage getStreamMessage(int index) { return _messages.get(index); }
@Test public void testMessageBatchWithStitching() { PulsarConfig config = mock(PulsarConfig.class); when(config.getEnableKeyValueStitch()).thenReturn(true); List<BytesStreamMessage> streamMessages = List.of(PulsarUtils.buildPulsarStreamMessage(_message, config)); PulsarMessageBatch messageBatch = new PulsarMessageBatch(streamMessages, mock(MessageIdStreamOffset.class), false); BytesStreamMessage streamMessage = messageBatch.getStreamMessage(0); byte[] keyValueBytes = streamMessage.getValue(); assertNotNull(keyValueBytes); assertEquals(keyValueBytes.length, 8 + _expectedKeyBytes.length + _expectedValueBytes.length); ByteBuffer byteBuffer = ByteBuffer.wrap(keyValueBytes); int keyLength = byteBuffer.getInt(); byte[] keyBytes = new byte[keyLength]; byteBuffer.get(keyBytes); assertEquals(keyBytes, _expectedKeyBytes); int valueLength = byteBuffer.getInt(); byte[] valueBytes = new byte[valueLength]; byteBuffer.get(valueBytes); assertEquals(valueBytes, _expectedValueBytes); }
public static String formatUuidPathFromParent(ComponentDto parent) { checkArgument(!Strings.isNullOrEmpty(parent.getUuidPath())); checkArgument(!Strings.isNullOrEmpty(parent.uuid())); return parent.getUuidPath() + parent.uuid() + UUID_PATH_SEPARATOR; }
@Test void formatUuidPathFromParent() { ComponentDto parent = ComponentTesting.newPrivateProjectDto("123").setUuidPath(ComponentDto.UUID_PATH_OF_ROOT); assertThat(ComponentDto.formatUuidPathFromParent(parent)).isEqualTo(".123."); }
public static ResolvedSchema expandCompositeTypeToSchema(DataType dataType) { if (dataType instanceof FieldsDataType) { return expandCompositeType((FieldsDataType) dataType); } else if (dataType.getLogicalType() instanceof LegacyTypeInformationType && dataType.getLogicalType().getTypeRoot() == STRUCTURED_TYPE) { return expandLegacyCompositeType(dataType); } throw new IllegalArgumentException("Expected a composite type"); }
@Test void testExpandStructuredType() { StructuredType logicalType = StructuredType.newBuilder(ObjectIdentifier.of("catalog", "database", "type")) .attributes( Arrays.asList( new StructuredType.StructuredAttribute( "f0", DataTypes.INT().getLogicalType()), new StructuredType.StructuredAttribute( "f1", DataTypes.STRING().getLogicalType()), new StructuredType.StructuredAttribute( "f2", DataTypes.TIMESTAMP(5).getLogicalType()), new StructuredType.StructuredAttribute( "f3", DataTypes.TIMESTAMP(3).getLogicalType()))) .build(); List<DataType> dataTypes = Arrays.asList( DataTypes.INT(), DataTypes.STRING(), DataTypes.TIMESTAMP(5).bridgedTo(Timestamp.class), DataTypes.TIMESTAMP(3)); FieldsDataType dataType = new FieldsDataType(logicalType, dataTypes); ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(dataType); assertThat(schema) .isEqualTo( ResolvedSchema.of( Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical( "f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class)))); }
public static JsonXStream getInstance() { return s_instance; }
@Test public void testEncodingDecodingWithMetaData() throws Exception { Applications applications = InstanceInfoGenerator.newBuilder(10, 2).withMetaData(true).build().toApplications(); XStream xstream = JsonXStream.getInstance(); String jsonDocument = xstream.toXML(applications); Applications decodedApplications = (Applications) xstream.fromXML(jsonDocument); assertThat(EurekaEntityComparators.equal(decodedApplications, applications), is(true)); }
public static void deleteTaskMetadata(HelixPropertyStore<ZNRecord> propertyStore, String taskType, String tableNameWithType) { String newPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadata(tableNameWithType, taskType); String oldPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadataDeprecated(taskType, tableNameWithType); boolean newPathDeleted = propertyStore.remove(newPath, AccessOption.PERSISTENT); boolean oldPathDeleted = propertyStore.remove(oldPath, AccessOption.PERSISTENT); if (!newPathDeleted || !oldPathDeleted) { throw new ZkException("Failed to delete task metadata: " + taskType + ", " + tableNameWithType); } }
@Test public void testDeleteTaskMetadata() { // no error HelixPropertyStore<ZNRecord> propertyStore = new FakePropertyStore(); MinionTaskMetadataUtils.deleteTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE); // both metadata paths will be removed propertyStore = new FakePropertyStore(); propertyStore.set(OLD_MINION_METADATA_PATH, OLD_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); propertyStore.set(NEW_MINION_METADATA_PATH, NEW_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); assertTrue(propertyStore.exists(OLD_MINION_METADATA_PATH, ACCESS_OPTION)); assertTrue(propertyStore.exists(NEW_MINION_METADATA_PATH, ACCESS_OPTION)); MinionTaskMetadataUtils.deleteTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE); assertFalse(propertyStore.exists(OLD_MINION_METADATA_PATH, ACCESS_OPTION)); assertFalse(propertyStore.exists(NEW_MINION_METADATA_PATH, ACCESS_OPTION)); // 1. ZNode MINION_TASK_METADATA/TestTable_OFFLINE and its descendants will be removed // 2. ZNode MINION_TASK_METADATA/<any task type>/TestTable_OFFLINE will also be removed String anotherTable = "anotherTable_OFFLINE"; String anotherOldMinionMetadataPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadataDeprecated(TASK_TYPE, anotherTable); DummyTaskMetadata anotherOldTaskMetadata = new DummyTaskMetadata(anotherTable, 20); String anotherNewMinionMetadataPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadata(anotherTable, TASK_TYPE); DummyTaskMetadata anotherNewTaskMetadata = new DummyTaskMetadata(anotherTable, 200); propertyStore = new FakePropertyStore(); propertyStore.set(OLD_MINION_METADATA_PATH, OLD_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); propertyStore.set(NEW_MINION_METADATA_PATH, NEW_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); propertyStore.set(anotherOldMinionMetadataPath, anotherOldTaskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); propertyStore.set(anotherNewMinionMetadataPath, anotherNewTaskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); assertTrue(propertyStore.exists(OLD_MINION_METADATA_PATH, ACCESS_OPTION)); assertTrue(propertyStore.exists(NEW_MINION_METADATA_PATH, ACCESS_OPTION)); assertTrue(propertyStore.exists(anotherOldMinionMetadataPath, ACCESS_OPTION)); assertTrue(propertyStore.exists(anotherNewMinionMetadataPath, ACCESS_OPTION)); MinionTaskMetadataUtils.deleteTaskMetadata(propertyStore, TABLE_NAME_WITH_TYPE); assertFalse(propertyStore.exists(OLD_MINION_METADATA_PATH, ACCESS_OPTION)); assertFalse(propertyStore.exists(NEW_MINION_METADATA_PATH, ACCESS_OPTION)); assertTrue(propertyStore.exists(anotherOldMinionMetadataPath, ACCESS_OPTION)); assertTrue(propertyStore.exists(anotherNewMinionMetadataPath, ACCESS_OPTION)); }
public static Optional<Integer> maybeFetchErrorIntervalMs(short errorCode, int intervalMs) { if (errorCode == Errors.NONE.code()) return Optional.empty(); int pushIntervalMs; String reason; Errors error = Errors.forCode(errorCode); switch (error) { case INVALID_REQUEST: case INVALID_RECORD: case UNSUPPORTED_VERSION: pushIntervalMs = Integer.MAX_VALUE; reason = "The broker response indicates the client sent an request that cannot be resolved" + " by re-trying, hence disable telemetry"; break; case UNKNOWN_SUBSCRIPTION_ID: case UNSUPPORTED_COMPRESSION_TYPE: pushIntervalMs = 0; reason = error.message(); break; case TELEMETRY_TOO_LARGE: case THROTTLING_QUOTA_EXCEEDED: reason = error.message(); pushIntervalMs = (intervalMs != -1) ? intervalMs : ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS; break; default: reason = "Unwrapped error code"; log.error("Error code: {}. Unmapped error for telemetry, disable telemetry.", errorCode); pushIntervalMs = Integer.MAX_VALUE; } log.debug("Error code: {}, reason: {}. Push interval update to {} ms.", errorCode, reason, pushIntervalMs); return Optional.of(pushIntervalMs); }
@Test public void testMaybeFetchErrorIntervalMs() { assertEquals(Optional.empty(), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.NONE.code(), -1)); assertEquals(Optional.of(Integer.MAX_VALUE), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.INVALID_REQUEST.code(), -1)); assertEquals(Optional.of(Integer.MAX_VALUE), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.INVALID_RECORD.code(), -1)); assertEquals(Optional.of(0), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.UNKNOWN_SUBSCRIPTION_ID.code(), -1)); assertEquals(Optional.of(0), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.UNSUPPORTED_COMPRESSION_TYPE.code(), -1)); assertEquals(Optional.of(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.TELEMETRY_TOO_LARGE.code(), -1)); assertEquals(Optional.of(20000), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.TELEMETRY_TOO_LARGE.code(), 20000)); assertEquals(Optional.of(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.THROTTLING_QUOTA_EXCEEDED.code(), -1)); assertEquals(Optional.of(20000), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.THROTTLING_QUOTA_EXCEEDED.code(), 20000)); assertEquals(Optional.of(Integer.MAX_VALUE), ClientTelemetryUtils.maybeFetchErrorIntervalMs(Errors.UNKNOWN_SERVER_ERROR.code(), -1)); }
@GET @Path(RMWSConsts.APPS) @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public AppsInfo getApps(@Context HttpServletRequest hsr, @QueryParam(RMWSConsts.STATE) String stateQuery, @QueryParam(RMWSConsts.STATES) Set<String> statesQuery, @QueryParam(RMWSConsts.FINAL_STATUS) String finalStatusQuery, @QueryParam(RMWSConsts.USER) String userQuery, @QueryParam(RMWSConsts.QUEUE) String queueQuery, @QueryParam(RMWSConsts.LIMIT) String limit, @QueryParam(RMWSConsts.STARTED_TIME_BEGIN) String startedBegin, @QueryParam(RMWSConsts.STARTED_TIME_END) String startedEnd, @QueryParam(RMWSConsts.FINISHED_TIME_BEGIN) String finishBegin, @QueryParam(RMWSConsts.FINISHED_TIME_END) String finishEnd, @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> applicationTypes, @QueryParam(RMWSConsts.APPLICATION_TAGS) Set<String> applicationTags, @QueryParam(RMWSConsts.NAME) String name, @QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) { AppsCacheKey cacheKey = AppsCacheKey.newInstance(stateQuery, new HashSet<>(statesQuery), finalStatusQuery, userQuery, queueQuery, limit, startedBegin, startedEnd, finishBegin, finishEnd, new HashSet<>(applicationTypes), new HashSet<>(applicationTags), name, unselectedFields); if (this.enableAppsCache) { long successTimes = getAppsSuccessTimes.incrementAndGet(); if (successTimes % 1000 == 0) { LOG.debug("hit cache info: getAppsSuccessTimes={}, hitAppsCacheTimes={}", successTimes, hitAppsCacheTimes.get()); } AppsInfo appsInfo = appsLRUCache.get(cacheKey); if (appsInfo != null) { hitAppsCacheTimes.getAndIncrement(); return appsInfo; } } initForReadableEndpoints(); GetApplicationsRequest request = ApplicationsRequestBuilder.create() .withStateQuery(stateQuery) .withStatesQuery(statesQuery) .withUserQuery(userQuery) .withQueueQuery(rm, queueQuery) .withLimit(limit) .withStartedTimeBegin(startedBegin) .withStartedTimeEnd(startedEnd) .withFinishTimeBegin(finishBegin) .withFinishTimeEnd(finishEnd) .withApplicationTypes(applicationTypes) .withApplicationTags(applicationTags) .withName(name) .build(); List<ApplicationReport> appReports; try { appReports = rm.getClientRMService().getApplications(request) .getApplicationList(); } catch (YarnException e) { LOG.error("Unable to retrieve apps from ClientRMService", e); throw new YarnRuntimeException( "Unable to retrieve apps from ClientRMService", e); } final ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps(); AppsInfo allApps = new AppsInfo(); for (ApplicationReport report : appReports) { RMApp rmapp = apps.get(report.getApplicationId()); if (rmapp == null) { continue; } if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) { FinalApplicationStatus.valueOf(finalStatusQuery); if (!rmapp.getFinalApplicationStatus().toString() .equalsIgnoreCase(finalStatusQuery)) { continue; } } DeSelectFields deSelectFields = new DeSelectFields(); deSelectFields.initFields(unselectedFields); boolean allowAccess = hasAccess(rmapp, hsr); // Given RM is configured to display apps per user, skip apps to which // this caller doesn't have access to view. if (filterAppsByUser && !allowAccess) { continue; } AppInfo app = new AppInfo(rm, rmapp, allowAccess, WebAppUtils.getHttpSchemePrefix(conf), deSelectFields); allApps.add(app); } if (filterInvalidXMLChars) { final String format = hsr.getHeader(HttpHeaders.ACCEPT); if (format != null && format.toLowerCase().contains(MediaType.APPLICATION_XML)) { for (AppInfo appInfo : allApps.getApps()) { appInfo.setNote(escapeInvalidXMLCharacters(appInfo.getNote())); } } } if (enableAppsCache) { appsLRUCache.put(cacheKey, allApps); getAppsSuccessTimes.getAndIncrement(); } return allApps; }
@Test public void testAppsRace() throws Exception { // mock up an RM that returns app reports for apps that don't exist // in the RMApps list ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationReport mockReport = mock(ApplicationReport.class); when(mockReport.getApplicationId()).thenReturn(appId); GetApplicationsResponse mockAppsResponse = mock(GetApplicationsResponse.class); when(mockAppsResponse.getApplicationList()) .thenReturn(Arrays.asList(new ApplicationReport[] { mockReport })); ClientRMService mockClientSvc = mock(ClientRMService.class); when(mockClientSvc.getApplications(isA(GetApplicationsRequest.class))) .thenReturn(mockAppsResponse); ResourceManager mockRM = mock(ResourceManager.class); RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, null, null, null, null); when(mockRM.getRMContext()).thenReturn(rmContext); when(mockRM.getClientRMService()).thenReturn(mockClientSvc); rmContext.setNodeLabelManager(mock(RMNodeLabelsManager.class)); RMWebServices webSvc = new RMWebServices(mockRM, new Configuration(), mock(HttpServletResponse.class)); final Set<String> emptySet = Collections.unmodifiableSet(Collections.<String>emptySet()); // verify we don't get any apps when querying HttpServletRequest mockHsr = mock(HttpServletRequest.class); AppsInfo appsInfo = webSvc.getApps(mockHsr, null, emptySet, null, null, null, null, null, null, null, null, emptySet, emptySet, null, null); assertTrue(appsInfo.getApps().isEmpty()); // verify we don't get an NPE when specifying a final status query appsInfo = webSvc.getApps(mockHsr, null, emptySet, "FAILED", null, null, null, null, null, null, null, emptySet, emptySet, null, null); assertTrue(appsInfo.getApps().isEmpty()); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseB3SingleFormat_spanIdsWithDebug() { assertThat(parseB3SingleFormat(traceId + "-" + spanId + "-d").context()) .isEqualToComparingFieldByField(TraceContext.newBuilder() .traceId(Long.parseUnsignedLong(traceId, 16)) .spanId(Long.parseUnsignedLong(spanId, 16)) .debug(true).build() ); }
public static Resource clone(Resource res) { return Resource.newInstance(res); }
@Test void testClone() { assertEquals(createResource(1, 1), Resources.clone(createResource(1, 1))); assertEquals(createResource(1, 1, 0), Resources.clone(createResource(1, 1))); assertEquals(createResource(1, 1), Resources.clone(createResource(1, 1, 0))); assertEquals(createResource(1, 1, 2), Resources.clone(createResource(1, 1, 2))); }
@Override public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true); } // We don't actually use these offsets in the task class, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsTombstones() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets( null, Collections.singletonMap(partition, null) ); Map<String, Object> partition = sourcePartition("src", "bak"); assertTrue(() -> alterOffsets.apply(partition)); partition.put(SOURCE_CLUSTER_ALIAS_KEY, 618); assertTrue(() -> alterOffsets.apply(partition)); partition.remove(SOURCE_CLUSTER_ALIAS_KEY); assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && (node.has("minItems") || node.has("maxItems")) && isApplicableType(field)) { final Class<? extends Annotation> sizeClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? Size.class : javax.validation.constraints.Size.class; JAnnotationUse annotation = field.annotate(sizeClass); if (node.has("minItems")) { annotation.param("min", node.get("minItems").asInt()); } if (node.has("maxItems")) { annotation.param("max", node.get("maxItems").asInt()); } } return field; }
@Test public void testMaxLength() { when(config.isIncludeJsr303Annotations()).thenReturn(true); final int maxValue = new Random().nextInt(); when(subNode.asInt()).thenReturn(maxValue); when(node.get("maxItems")).thenReturn(subNode); when(fieldVar.annotate(sizeClass)).thenReturn(annotation); when(node.has("maxItems")).thenReturn(true); when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName()); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(sizeClass); verify(annotation, times(isApplicable ? 1 : 0)).param("max", maxValue); verify(annotation, never()).param(eq("min"), anyInt()); }
@Override public List<Pair<HoodieRecord, Schema>> fullOuterMerge(HoodieRecord older, Schema oldSchema, HoodieRecord newer, Schema newSchema, TypedProperties props) throws IOException { // If the new record is not a delete record, then combine the two records. if (newer.isDelete(newSchema, props)) { return Collections.singletonList(Pair.of(newer, newSchema)); } checkArgument(older.getRecordKey().equals(newer.getRecordKey()), "Record key must be the same for both records"); checkArgument(oldSchema.equals(newSchema), "Schema must be the same for both records"); return Arrays.asList(Pair.of(older, oldSchema), Pair.of(newer, newSchema)); }
@Test public void testFullOuterMerge() throws IOException { List<HoodieRecord> newRecordList = dataGen.generateInserts("000", 1); List<HoodieRecord> updateRecordList = dataGen.generateUpdates("0001", newRecordList); HoodieMetadataRecordMerger recordMerger = new HoodieMetadataRecordMerger(); List<Pair<HoodieRecord, Schema>> mergedRecords = recordMerger.fullOuterMerge(newRecordList.get(0), AVRO_SCHEMA, updateRecordList.get(0), AVRO_SCHEMA, new TypedProperties()); assertEquals(2, mergedRecords.size()); assertEquals(updateRecordList.get(0), mergedRecords.get(1).getLeft()); }
@Override public final MetadataResolver resolve(final boolean force) { if (force) { internalLoad(); } return load(); }
@Test public void resolveMetadataFromByteArray() throws Exception { var configuration = new SAML2Configuration(); configuration.setIdentityProviderMetadataResource( new ByteArrayResource(new ClassPathResource("idp-metadata.xml").getInputStream().readAllBytes())); metadataResolver = new SAML2IdentityProviderMetadataResolver(configuration); var resolver = metadataResolver.resolve(); assertNotNull(resolver); assertFalse(metadataResolver.hasChanged()); assertNotNull(metadataResolver.load()); }
@Override public ValidationTaskResult validateImpl(Map<String, String> optionMap) { // Skip this test if NOSASL if (mConf.get(PropertyKey.SECURITY_AUTHENTICATION_TYPE) .equals(AuthType.NOSASL)) { return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(), String.format("Impersonation validation is skipped for NOSASL"), ""); } ValidationTaskResult loadConfig = loadHdfsConfig(); if (loadConfig.getState() != ValidationUtils.State.OK) { mAdvice.insert(0, "Validating the proxy user requires additional HDFS " + "configuration. "); return loadConfig.setAdvice(mAdvice.toString()); } // TODO(jiacheng): validate proxyuser.hosts for the cluster // Validate proxyuser config for the current Alluxio user try { String alluxioUser = getCurrentUser(); return validateProxyUsers(alluxioUser); } catch (UnauthenticatedException e) { mMsg.append(String.format("Failed to authenticate in Alluxio: ")); mMsg.append(ExceptionUtils.asPlainText(e)); mAdvice.append("Please fix the authentication issue."); return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), mMsg.toString(), mAdvice.toString()); } }
@Test public void wildcardProxyUsers() { String userName = System.getProperty("user.name"); // Proxy users configured but not groups prepareHdfsConfFiles(ImmutableMap.of( String.format("hadoop.proxyuser.%s.users", userName), "*")); HdfsProxyUserValidationTask task = new HdfsProxyUserValidationTask("hdfs://namenode:9000/alluxio", mConf); ValidationTaskResult result = task.validateImpl(ImmutableMap.of()); assertEquals(ValidationUtils.State.OK, result.getState()); }
@Override public int findConfigHistoryCountByTime(final Timestamp startTime) { HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO); MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.START_TIME, startTime); MapperResult mapperResult = historyConfigInfoMapper.findConfigHistoryCountByTime(context); Integer result = jt.queryForObject(mapperResult.getSql(), mapperResult.getParamList().toArray(), Integer.class); if (result == null) { throw new IllegalArgumentException("findConfigHistoryCountByTime error"); } return result; }
@Test void testFindConfigHistoryCountByTime() { Timestamp timestamp = new Timestamp(System.currentTimeMillis()); //mock count Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {timestamp}), eq(Integer.class))).thenReturn(308); //execute & verify int count = externalHistoryConfigInfoPersistService.findConfigHistoryCountByTime(timestamp); assertEquals(308, count); //mock count is null Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {timestamp}), eq(Integer.class))).thenReturn(null); //execute & verify try { externalHistoryConfigInfoPersistService.findConfigHistoryCountByTime(timestamp); assertTrue(false); } catch (Exception e) { assertEquals("findConfigHistoryCountByTime error", e.getMessage()); } }
protected String[] getQueryParamValues(MultiValuedTreeMap<String, String> qs, String key, boolean isCaseSensitive) { List<String> value = getQueryParamValuesAsList(qs, key, isCaseSensitive); if (value == null){ return null; } return value.toArray(new String[0]); }
@Test void queryParamValues_getQueryParamValues() { AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(new AwsProxyRequest(), mockContext, null); MultiValuedTreeMap<String, String> map = new MultiValuedTreeMap<>(); map.add("test", "test"); map.add("test", "test2"); String[] result1 = request.getQueryParamValues(map, "test", true); assertArrayEquals(new String[]{"test", "test2"}, result1); String[] result2 = request.getQueryParamValues(map, "TEST", true); assertNull(result2); }
@Override public Future<Message> preVote(final Endpoint endpoint, final RequestVoteRequest request, final RpcResponseClosure<RequestVoteResponse> done) { if (!checkConnection(endpoint, true)) { return onConnectionFail(endpoint, request, done, this.rpcExecutor); } return invokeWithDone(endpoint, request, done, this.nodeOptions.getElectionTimeoutMs()); }
@Test public void testPreVote() { this.clientService.preVote(this.endpoint, RequestVoteRequest.newBuilder(). // setGroupId("test"). // setLastLogIndex(1). // setLastLogTerm(1). // setPeerId("localhost:1010"). // setTerm(1).setServerId("localhost:1011").setPreVote(true).build(), null); }
public EtcdClient(final Client client) { this.client = client; }
@Test public void testEtcdClient() throws ExecutionException, InterruptedException { final Client client = mock(Client.class); final EtcdClient etcdClient = new EtcdClient(client); assertDoesNotThrow(etcdClient::close); final KV mockKV = mock(KV.class); when(client.getKVClient()).thenReturn(mockKV); final CompletableFuture<GetResponse> future = mock(CompletableFuture.class); when(mockKV.get(any(ByteSequence.class))).thenReturn(future); doThrow(new InterruptedException()).when(future).get(); assertDoesNotThrow(() -> etcdClient.get("key")); }
public static long createFileId(long containerId) { long id = BlockId.createBlockId(containerId, BlockId.getMaxSequenceNumber()); if (id == INVALID_FILE_ID) { // Right now, there's not much we can do if the file id we're returning is -1, since the file // id is completely determined by the container id passed in. However, by the current // algorithm, -1 will be the last file id generated, so the chances somebody will get to that // are slim. For now we just log it. LOG.warn("Created file id -1, which is invalid"); } return id; }
@Test public void createFileId() throws Exception { long containerId = 1; long fileId = IdUtils.createFileId(containerId); assertNotEquals(-1, fileId); }
@Override public MonitorConfig build() { MonitorConfig monitorConfig = new MonitorConfig(); super.build(monitorConfig); monitorConfig.setProtocol(protocol); monitorConfig.setAddress(address); monitorConfig.setUsername(username); monitorConfig.setPassword(password); monitorConfig.setGroup(group); monitorConfig.setVersion(version); monitorConfig.setInterval(interval); monitorConfig.setParameters(parameters); monitorConfig.setDefault(isDefault); return monitorConfig; }
@Test void build() { MonitorBuilder builder = MonitorBuilder.newBuilder(); builder.protocol("protocol") .address("address") .group("group") .interval("interval") .isDefault(true) .password("password") .username("username") .version("version") .appendParameter("default.num", "one") .id("id"); MonitorConfig config = builder.build(); MonitorConfig config2 = builder.build(); Assertions.assertEquals("protocol", config.getProtocol()); Assertions.assertEquals("address", config.getAddress()); Assertions.assertEquals("group", config.getGroup()); Assertions.assertEquals("interval", config.getInterval()); Assertions.assertEquals("password", config.getPassword()); Assertions.assertEquals("username", config.getUsername()); Assertions.assertEquals("version", config.getVersion()); Assertions.assertTrue(config.isDefault()); Assertions.assertTrue(config.getParameters().containsKey("default.num")); Assertions.assertEquals("one", config.getParameters().get("default.num")); Assertions.assertEquals("id", config.getId()); Assertions.assertNotSame(config, config2); }
@Nonnull public T createAndRestore( @Nonnull List<? extends Collection<S>> restoreOptions, @Nonnull StateObject.StateObjectSizeStatsCollector stats) throws Exception { if (restoreOptions.isEmpty()) { restoreOptions = Collections.singletonList(Collections.emptyList()); } int alternativeIdx = 0; Exception collectedException = null; while (alternativeIdx < restoreOptions.size()) { Collection<S> restoreState = restoreOptions.get(alternativeIdx); ++alternativeIdx; // IMPORTANT: please be careful when modifying the log statements because they are used // for validation in // the automatic end-to-end tests. Those tests might fail if they are not aligned with // the log message! if (restoreState.isEmpty()) { LOG.debug("Creating {} with empty state.", logDescription); } else { if (LOG.isTraceEnabled()) { LOG.trace( "Creating {} and restoring with state {} from alternative ({}/{}).", logDescription, restoreState, alternativeIdx, restoreOptions.size()); } else { LOG.debug( "Creating {} and restoring with state from alternative ({}/{}).", logDescription, alternativeIdx, restoreOptions.size()); } } try { T successfullyRestored = attemptCreateAndRestore(restoreState); // Obtain and report stats for the state objects used in our successful restore restoreState.forEach(handle -> handle.collectSizeStats(stats)); return successfullyRestored; } catch (Exception ex) { collectedException = ExceptionUtils.firstOrSuppressed(ex, collectedException); if (backendCloseableRegistry.isClosed()) { throw new FlinkException( "Stopping restore attempts for already cancelled task.", collectedException); } LOG.warn( "Exception while restoring {} from alternative ({}/{}), will retry while more " + "alternatives are available.", logDescription, alternativeIdx, restoreOptions.size(), ex); } } throw new FlinkException( "Could not restore " + logDescription + " from any of the " + restoreOptions.size() + " provided restore options.", collectedException); }
@Test void testRestoreProcedureOrderAndFailure() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); CheckpointStreamFactory checkpointStreamFactory = new MemCheckpointStreamFactory(1024); ListStateDescriptor<Integer> stateDescriptor = new ListStateDescriptor<>("test-state", Integer.class); OperatorStateBackend originalBackend = backendSupplier.apply(Collections.emptyList()); SnapshotResult<OperatorStateHandle> snapshotResult; try { ListState<Integer> listState = originalBackend.getListState(stateDescriptor); listState.add(0); listState.add(1); listState.add(2); listState.add(3); RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshot = originalBackend.snapshot( 0L, 0L, checkpointStreamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()); snapshot.run(); snapshotResult = snapshot.get(); } finally { originalBackend.close(); originalBackend.dispose(); } OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle secondSuccessHandle = spy(snapshotResult.getJobManagerOwnedSnapshot()); OperatorStateHandle thirdNotUsedHandle = mock(OperatorStateHandle.class); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList( new StateObjectCollection<>(Collections.singletonList(firstFailHandle)), new StateObjectCollection<>(Collections.singletonList(secondSuccessHandle)), new StateObjectCollection<>(Collections.singletonList(thirdNotUsedHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>( backendSupplier, closeableRegistry, "test op state backend"); OperatorStateBackend restoredBackend = restorerProcedure.createAndRestore( sortedRestoreOptions, StateObject.StateObjectSizeStatsCollector.create()); assertThat(restoredBackend).isNotNull(); try { verify(firstFailHandle).openInputStream(); verify(secondSuccessHandle).openInputStream(); verify(thirdNotUsedHandle, times(0)).openInputStream(); ListState<Integer> listState = restoredBackend.getListState(stateDescriptor); Iterator<Integer> stateIterator = listState.get().iterator(); assertThat(stateIterator.next()).isZero(); assertThat(stateIterator.next()).isOne(); assertThat(stateIterator.next()).isEqualTo(2); assertThat(stateIterator.next()).isEqualTo(3); assertThat(stateIterator).isExhausted(); } finally { restoredBackend.close(); restoredBackend.dispose(); } }
public String getMode() { return Integer.toString(toInteger(), 8); }
@Test public void testModeStickyBit() { final Permission permission = new Permission(Permission.Action.read, Permission.Action.none, Permission.Action.none, true, false, false); assertEquals("1400", permission.getMode()); }
public void updateAutoCommitTimer(final long currentTimeMs) { this.autoCommitState.ifPresent(t -> t.updateTimer(currentTimeMs)); }
@Test public void testAutocommitInterceptorsInvoked() { TopicPartition t1p = new TopicPartition("topic1", 0); subscriptionState.assignFromUser(singleton(t1p)); subscriptionState.seek(t1p, 100); CommitRequestManager commitRequestManager = create(true, 100); time.sleep(100); commitRequestManager.updateAutoCommitTimer(time.milliseconds()); List<NetworkClientDelegate.FutureCompletionHandler> futures = assertPoll(1, commitRequestManager); // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse(new OffsetCommitResponse(0, new HashMap<>()))); verify(offsetCommitCallbackInvoker).enqueueInterceptorInvocation( eq(Collections.singletonMap(t1p, new OffsetAndMetadata(100L))) ); }
public boolean initAndAddIssue(Issue issue) { DefaultInputComponent inputComponent = (DefaultInputComponent) issue.primaryLocation().inputComponent(); if (noSonar(inputComponent, issue)) { return false; } ActiveRule activeRule = activeRules.find(issue.ruleKey()); if (activeRule == null) { // rule does not exist or is not enabled -> ignore the issue return false; } ScannerReport.Issue rawIssue = createReportIssue(issue, inputComponent.scannerId(), activeRule.severity()); if (filters.accept(inputComponent, rawIssue)) { write(inputComponent.scannerId(), rawIssue); return true; } return false; }
@Test public void add_issue_to_cache() { initModuleIssues(); final String ruleDescriptionContextKey = "spring"; DefaultIssue issue = new DefaultIssue(project) .at(new DefaultIssueLocation().on(file).at(file.selectLine(3)).message("Foo")) .forRule(JAVA_RULE_KEY) .overrideSeverity(org.sonar.api.batch.rule.Severity.CRITICAL) .setQuickFixAvailable(true) .setRuleDescriptionContextKey(ruleDescriptionContextKey) .setCodeVariants(List.of("variant1", "variant2")) .overrideImpact(MAINTAINABILITY, org.sonar.api.issue.impact.Severity.HIGH) .overrideImpact(RELIABILITY, org.sonar.api.issue.impact.Severity.LOW); when(filters.accept(any(InputComponent.class), any(ScannerReport.Issue.class))).thenReturn(true); boolean added = moduleIssues.initAndAddIssue(issue); assertThat(added).isTrue(); ArgumentCaptor<ScannerReport.Issue> argument = ArgumentCaptor.forClass(ScannerReport.Issue.class); verify(reportPublisher.getWriter()).appendComponentIssue(eq(file.scannerId()), argument.capture()); assertThat(argument.getValue().getSeverity()).isEqualTo(org.sonar.scanner.protocol.Constants.Severity.CRITICAL); assertThat(argument.getValue().getQuickFixAvailable()).isTrue(); assertThat(argument.getValue().getRuleDescriptionContextKey()).isEqualTo(ruleDescriptionContextKey); assertThat(argument.getValue().getCodeVariantsList()).containsExactly("variant1", "variant2"); ScannerReport.Impact impact1 = ScannerReport.Impact.newBuilder().setSoftwareQuality(MAINTAINABILITY.name()).setSeverity("HIGH").build(); ScannerReport.Impact impact2 = ScannerReport.Impact.newBuilder().setSoftwareQuality(RELIABILITY.name()).setSeverity("LOW").build(); assertThat(argument.getValue().getOverridenImpactsList()).containsExactly(impact1, impact2); }
@Override public IcebergEnumeratorState snapshotState(long checkpointId) { return new IcebergEnumeratorState( enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot()); }
@Test public void testTransientPlanningErrorsWithSuccessfulRetry() throws Exception { TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext = new TestingSplitEnumeratorContext<>(4); ScanContext scanContext = ScanContext.builder() .streaming(true) .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT) .maxPlanningSnapshotCount(1) .maxAllowedPlanningFailures(2) .build(); ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 1); ContinuousIcebergEnumerator enumerator = createEnumerator(enumeratorContext, scanContext, splitPlanner); // Make one split available and trigger the periodic discovery List<IcebergSourceSplit> splits = SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1); splitPlanner.addSplits(splits); // Trigger a planning and check that no splits returned due to the planning error enumeratorContext.triggerAllActions(); assertThat(enumerator.snapshotState(2).pendingSplits()).isEmpty(); // Second scan planning should succeed and discover the expected splits enumeratorContext.triggerAllActions(); Collection<IcebergSourceSplitState> pendingSplits = enumerator.snapshotState(3).pendingSplits(); assertThat(pendingSplits).hasSize(1); IcebergSourceSplitState pendingSplit = pendingSplits.iterator().next(); assertThat(pendingSplit.split().splitId()).isEqualTo(splits.get(0).splitId()); assertThat(pendingSplit.status()).isEqualTo(IcebergSourceSplitStatus.UNASSIGNED); }
public String analysisOutput() { analyze(); String s = "\n"; for (FlowEntry flow: label.keySet()) { s += ("Flow Rule: " + flowEntryRepresentation(flow) + "\n"); s += ("Analysis: " + label.get(flow) + "!\n\n"); } return s; }
@Test @Ignore("This needs to be reworked to be more robust") public void basic() { flowRuleService = new MockFlowRuleService(); flowRuleService.applyFlowRules(genFlow("ATL-001", 110, 90)); flowRuleService.applyFlowRules(genFlow("ATL-001", 110, 100)); flowRuleService.applyFlowRules(genFlow("ATL-001", 110, 150)); flowRuleService.applyFlowRules(genFlow("ATL-002", 80, 70)); flowRuleService.applyFlowRules(genFlow("ATL-003", 120, 130)); flowRuleService.applyFlowRules(genFlow("ATL-004", 50)); flowRuleService.applyFlowRules(genFlow("ATL-005", 140, 10)); linkService.addLink("H00:00:00:00:00:0660", 160, "ATL-005", 140); linkService.addLink("ATL-005", 10, "ATL-004", 40); linkService.addLink("ATL-004", 50, "ATL-002", 80); linkService.addLink("ATL-002", 70, "ATL-001", 110); linkService.addLink("ATL-001", 150, "H00:00:00:00:00:0770", 170); linkService.addLink("ATL-001", 90, "ATL-004", 30); linkService.addLink("ATL-001", 100, "ATL-003", 120); linkService.addLink("ATL-003", 130, "ATL-005", 20); topologyService = new MockTopologyService(linkService.createdGraph); FlowAnalyzer flowAnalyzer = new FlowAnalyzer(); flowAnalyzer.flowRuleService = flowRuleService; flowAnalyzer.linkService = linkService; flowAnalyzer.topologyService = topologyService; String labels = flowAnalyzer.analysisOutput(); String correctOutput = "Flow Rule: Device: atl-005, [IN_PORT{port=140}], [OUTPUT{port=10}]\n" + "Analysis: Cleared!\n" + "\n" + "Flow Rule: Device: atl-003, [IN_PORT{port=120}], [OUTPUT{port=130}]\n" + "Analysis: Black Hole!\n" + "\n" + "Flow Rule: Device: atl-001, [IN_PORT{port=110}], [OUTPUT{port=90}]\n" + "Analysis: Cycle Critical Point!\n" + "\n" + "Flow Rule: Device: atl-004, [], [OUTPUT{port=50}]\n" + "Analysis: Cycle!\n" + "\n" + "Flow Rule: Device: atl-001, [IN_PORT{port=110}], [OUTPUT{port=150}]\n" + "Analysis: Cleared!\n" + "\n" + "Flow Rule: Device: atl-001, [IN_PORT{port=110}], [OUTPUT{port=100}]\n" + "Analysis: Black Hole!\n" + "\n" + "Flow Rule: Device: atl-002, [IN_PORT{port=80}], [OUTPUT{port=70}]\n" + "Analysis: Cycle!\n"; assertEquals("Wrong labels", new TreeSet(Arrays.asList(labels.replaceAll("\\s+", "").split("!"))), new TreeSet(Arrays.asList(correctOutput.replaceAll("\\s+", "").split("!")))); }
public void close(long timeoutMs) { ThreadUtils.shutdownExecutorServiceQuietly(commitExecutorService, timeoutMs, TimeUnit.MILLISECONDS); }
@Test public void testCloseInterrupted() throws InterruptedException { long timeoutMs = 1000; // Termination interrupted when(executor.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS)).thenThrow(new InterruptedException()); committer.close(timeoutMs); verify(executor).shutdown(); }
@Override public float readFloat() { return Float.intBitsToFloat(readInt()); }
@Test public void testReadFloatAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().readFloat(); } }); }
@Override public void setConfiguration(final Path container, final LifecycleConfiguration configuration) throws BackgroundException { try { if(LifecycleConfiguration.empty().equals(configuration)) { session.getClient().updateBucket( fileid.getVersionId(containerService.getContainer(container)), new B2BucketTypeFeature(session, fileid).toBucketType(container.attributes().getAcl()) ); } else { session.getClient().updateBucket( fileid.getVersionId(containerService.getContainer(container)), new B2BucketTypeFeature(session, fileid).toBucketType(container.attributes().getAcl()), new LifecycleRule( null == configuration.getExpiration() ? null : configuration.getExpiration().longValue(), null == configuration.getTransition() ? null : configuration.getTransition().longValue(), StringUtils.EMPTY) ); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, container); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to write attributes of {0}", e, container); } }
@Test public void testSetConfiguration() throws Exception { final Path bucket = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)); final B2VersionIdProvider fileid = new B2VersionIdProvider(session); new B2DirectoryFeature(session, fileid).mkdir(bucket, new TransferStatus()); assertEquals(LifecycleConfiguration.empty(), new B2LifecycleFeature(session, fileid).getConfiguration(bucket)); new B2LifecycleFeature(session, fileid).setConfiguration(bucket, new LifecycleConfiguration(1, 30)); final LifecycleConfiguration configuration = new B2LifecycleFeature(session, fileid).getConfiguration(bucket); assertEquals(30, configuration.getExpiration(), 0L); assertEquals(1, configuration.getTransition(), 0L); new B2LifecycleFeature(session, fileid).setConfiguration(bucket, LifecycleConfiguration.empty()); assertEquals(LifecycleConfiguration.empty(), new B2LifecycleFeature(session, fileid).getConfiguration(bucket)); new B2DeleteFeature(session, fileid).delete(Collections.singletonList(bucket), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Udf public Long round(@UdfParameter final long val) { return val; }
@Test public void shoulldHandleNullValues() { assertThat(udf.round((Double)null), is((Long)null)); assertThat(udf.round((BigDecimal) null), is((BigDecimal) null)); assertThat(udf.round((Double)null, 2), is((Long)null)); assertThat(udf.round((BigDecimal) null, 2), is((BigDecimal) null)); }
public DataCarrier setPartitioner(IDataPartitioner<T> dataPartitioner) { this.channels.setPartitioner(dataPartitioner); return this; }
@Test public void testCreateDataCarrier() { DataCarrier<SampleData> carrier = new DataCarrier<>(5, 100, BufferStrategy.IF_POSSIBLE); Channels<SampleData> channels = Whitebox.getInternalState(carrier, "channels"); assertEquals(5, channels.getChannelSize()); QueueBuffer<SampleData> buffer = channels.getBuffer(0); assertEquals(100, buffer.getBufferSize()); assertEquals(Whitebox.getInternalState(buffer, "strategy"), BufferStrategy.IF_POSSIBLE); assertEquals(Whitebox.getInternalState(buffer, "strategy"), BufferStrategy.IF_POSSIBLE); assertEquals(Whitebox.getInternalState(channels, "dataPartitioner").getClass(), SimpleRollingPartitioner.class); carrier.setPartitioner(new ProducerThreadPartitioner<>()); assertEquals(Whitebox.getInternalState(channels, "dataPartitioner").getClass(), ProducerThreadPartitioner.class); }
public B appendParameter(String key, String value) { this.parameters = appendParameter(parameters, key, value); return getThis(); }
@Test void appendParameter() { MethodBuilder builder = new MethodBuilder(); builder.appendParameter("default.num", "one").appendParameter("num", "ONE"); Map<String, String> parameters = builder.build().getParameters(); Assertions.assertTrue(parameters.containsKey("default.num")); Assertions.assertEquals("ONE", parameters.get("num")); }
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException { final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class); final var status = OpenSAMLUtils.buildSAMLObject(Status.class); final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class); final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class); return ArtifactResponseBuilder .newInstance(artifactResponse) .addID() .addIssueInstant() .addInResponseTo(artifactResolveRequest.getArtifactResolve().getID()) .addStatus(StatusBuilder .newInstance(status) .addStatusCode(statusCode, StatusCode.SUCCESS) .build()) .addIssuer(issuer, entityId) .addMessage(buildResponse(artifactResolveRequest, entityId, signType)) .addSignature(signatureService, signType) .build(); }
@Test void validateBVDBuildConditionsCombiConnectFlow() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException, MetadataException, JsonProcessingException { when(bvdClientMock.retrieveRepresentationAffirmations(anyString())).thenReturn(getBvdResponse()); when(bvdMetadataServiceMock.generateMetadata()).thenReturn(getEntityDescriptor(BVD_ENTITY_ID)); ArtifactResolveRequest artifactResolveRequest = getArtifactResolveRequest("success", true,true, SAML_COMBICONNECT, EncryptionType.BSN, BVD_ENTITY_ID); artifactResolveRequest.getSamlSession().setRequesterId(BVD_ENTITY_ID); ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(artifactResolveRequest, BVD_ENTITY_ID, BVD); Response response = (Response) artifactResponse.getMessage(); verify(bvdClientMock, times(1)).retrieveRepresentationAffirmations(anyString()); assertNull(response.getAssertions().get(0).getConditions().getAudienceRestrictions().get(0).getAudiences().get(0).getURI()); verify(bvdMetadataServiceMock, times(1)).generateMetadata(); }
public static FieldType fieldTypeForJavaType(TypeDescriptor typeDescriptor) { // TODO: Convert for registered logical types. if (typeDescriptor.isArray() || typeDescriptor.isSubtypeOf(TypeDescriptor.of(Collection.class))) { return getArrayFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Map.class))) { return getMapFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Iterable.class))) { return getIterableFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Row.class))) { throw new IllegalArgumentException( "Cannot automatically determine a field type from a Row class" + " as we cannot determine the schema. You should set a field type explicitly."); } else { TypeName typeName = PRIMITIVE_MAPPING.inverse().get(typeDescriptor); if (typeName == null) { throw new RuntimeException("Couldn't find field type for " + typeDescriptor); } return FieldType.of(typeName); } }
@Test public void testPrimitiveTypeToFieldType() { assertEquals( FieldType.BYTE, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.bytes())); assertEquals( FieldType.INT16, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.shorts())); assertEquals( FieldType.INT32, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.integers())); assertEquals( FieldType.INT64, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.longs())); assertEquals( FieldType.DECIMAL, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.bigdecimals())); assertEquals( FieldType.FLOAT, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.floats())); assertEquals( FieldType.DOUBLE, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.doubles())); assertEquals( FieldType.STRING, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.strings())); assertEquals( FieldType.DATETIME, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptor.of(Instant.class))); assertEquals( FieldType.BOOLEAN, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.booleans())); assertEquals( FieldType.BYTES, FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptor.of(byte[].class))); }
public BackgroundException map(HttpResponse response) throws IOException { final S3ServiceException failure; if(null == response.getEntity()) { failure = new S3ServiceException(response.getStatusLine().getReasonPhrase()); } else { EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(), EntityUtils.toString(response.getEntity())); } failure.setResponseCode(response.getStatusLine().getStatusCode()); if(response.containsHeader(MINIO_ERROR_CODE)) { failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue()); } if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) { failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue()); } return this.map(failure); }
@Test public void testCustomMessage() { assertEquals("Custom.", new S3ExceptionMappingService().map("custom", new ServiceException("message")).getMessage()); assertEquals("Message. Please contact your web hosting service provider for assistance.", new S3ExceptionMappingService().map("custom", new ServiceException("message")).getDetail()); }
@Override public AppResponse process(Flow flow, AppRequest body) { Map<String, String> registration = digidClient.pollLetter(appSession.getAccountId(), appSession.getRegistrationId(), flow.getName().equals(ReApplyActivateActivationCode.NAME)); if (registration.get(lowerUnderscore(GBA_STATUS)).equals("request")) { setValid(false); return new StatusResponse("PENDING"); } else if (registration.get(lowerUnderscore(GBA_STATUS)).equals("deceased")){ digidClient.remoteLog("559", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_deceased"); } else if (GBA_EMIGATED_RNI.contains(registration.get(lowerUnderscore(GBA_STATUS)))) { digidClient.remoteLog("558", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_emigrated_RNI"); } else if (registration.get(lowerUnderscore(GBA_STATUS)).equals("error")){ return new NokResponse("error"); } else if (!registration.get(lowerUnderscore(GBA_STATUS)).equals("valid_app_extension")){ digidClient.remoteLog("558", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_invalid"); } digidClient.remoteLog("156", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), "device_name", appAuthenticator.getDeviceName(), lowerUnderscore(HIDDEN), true)); appAuthenticator.setRequestedAt(ZonedDateTime.now()); appAuthenticator.setIssuerType(registration.get(lowerUnderscore(ISSUER_TYPE))); digidClient.remoteLog("905", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new OkResponse(); }
@Test void processStatusDeceased(){ when(digidClientMock.pollLetter(mockedAppSession.getAccountId(), mockedAppSession.getRegistrationId(), false)).thenReturn(gbaStatusResponseDeceased); AppResponse appResponse = letterPolling.process(mockedFlow, mockedAbstractAppRequest); verify(digidClientMock, times(1)).remoteLog("559", Map.of(lowerUnderscore(ACCOUNT_ID), TEST_ACCOUNT_ID, "hidden", true)); assertTrue(appResponse instanceof NokResponse); assertEquals("gba_deceased", ((NokResponse)appResponse).getError()); }