focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public String encryptFilename(final BaseEncoding encoding, final String cleartextName, final byte[] associatedData) { final CacheKey key = new CacheKey(encoding, cleartextName, associatedData); if(encryptCache.contains(key)) { return encryptCache.get(key); } final String ciphertextName = impl.encryptFilename(encoding, cleartextName, associatedData); encryptCache.put(key, ciphertextName); decryptCache.put(new CacheKey(encoding, ciphertextName, associatedData), cleartextName); return ciphertextName; }
@Test public void TestEncryptFilename() { final FileNameCryptor mock = mock(FileNameCryptor.class); final CryptorCache cryptor = new CryptorCache(mock); when(mock.encryptFilename(any(), any(), any())).thenReturn(RandomStringUtils.randomAscii(10)); final String encrypted1 = cryptor.encryptFilename(CryptorCache.BASE32, "first", "id1".getBytes()); verify(mock, times(1)).encryptFilename(any(), any(), any()); assertEquals(encrypted1, cryptor.encryptFilename(CryptorCache.BASE32, "first", "id1".getBytes())); verify(mock, times(1)).encryptFilename(any(), any(), any()); // ensure using reverse cache from encryption assertEquals("first", cryptor.decryptFilename(CryptorCache.BASE32, encrypted1, "id1".getBytes())); verify(mock, times(1)).encryptFilename(any(), any(), any()); verifyNoMoreInteractions(mock); // cache miss on encoding cryptor.encryptFilename(BaseEncoding.base64Url(), "first", "id1".getBytes()); verify(mock, times(2)).encryptFilename(any(), any(), any()); // cache miss on cleartext cryptor.encryptFilename(CryptorCache.BASE32, "second", "id1".getBytes()); verify(mock, times(3)).encryptFilename(any(), any(), any()); // cache miss on byte[] cryptor.encryptFilename(CryptorCache.BASE32, "first", "id2".getBytes()); verify(mock, times(4)).encryptFilename(any(), any(), any()); }
public static Validator mapWithIntKeyDoubleValue() { return (name, val) -> { if (!(val instanceof String)) { throw new ConfigException(name, val, "Must be a string"); } final String str = (String) val; final Map<String, String> map = KsqlConfig.parseStringAsMap(name, str); map.forEach((keyStr, valueStr) -> { try { Integer.parseInt(keyStr); } catch (NumberFormatException e) { throw new ConfigException(name, keyStr, "Not an int"); } try { Double.parseDouble(valueStr); } catch (NumberFormatException e) { throw new ConfigException(name, valueStr, "Not a double"); } }); }; }
@Test public void shouldThrowOnBadIntDoubleValueInMap() { // Given: final Validator validator = ConfigValidators.mapWithIntKeyDoubleValue(); // When: final Exception e = assertThrows( ConfigException.class, () -> validator.ensureValid("propName", "1:abc") ); final Exception e2 = assertThrows( ConfigException.class, () -> validator.ensureValid("propName", "abc:1.2") ); // Then: assertThat(e.getMessage(), containsString("Invalid value abc for configuration propName: Not a double")); assertThat(e2.getMessage(), containsString("Invalid value abc for configuration propName: Not an int")); }
@Override public long get() { return complete(asyncCounter.get()); }
@Test(expected = StorageException.Timeout.class) public void testTimeout() { AtomicCounterWithErrors atomicCounter = new AtomicCounterWithErrors(); atomicCounter.setErrorState(TestingCompletableFutures.ErrorState.TIMEOUT_EXCEPTION); DefaultAtomicCounter counter = new DefaultAtomicCounter(atomicCounter, 1000); counter.get(); }
@Override public int ncol() { return n; }
@Test public void testNcols() { System.out.println("ncol"); assertEquals(3, matrix.ncol()); }
public String views(Namespace ns) { return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns), "views"); }
@Test public void viewsWithMultipartNamespace() { Namespace ns = Namespace.of("n", "s"); assertThat(withPrefix.views(ns)).isEqualTo("v1/ws/catalog/namespaces/n%1Fs/views"); assertThat(withoutPrefix.views(ns)).isEqualTo("v1/namespaces/n%1Fs/views"); }
@Override public void writeFloat(final float v) throws IOException { writeInt(Float.floatToIntBits(v)); }
@Test public void testWriteFloatForVByteOrder() throws Exception { float v = 1.1f; out.writeFloat(v, LITTLE_ENDIAN); int expected = Float.floatToIntBits(v); int actual = Bits.readIntL(out.buffer, 0); assertEquals(actual, expected); }
@Override public Optional<MaterializationRunnable> initMaterialization() throws Exception { if (lastConfirmedMaterializationId < materializedId - 1 && lastFailedMaterializationId < materializedId - 1) { // SharedStateRegistry potentially requires that the checkpoint's dependency on the // shared file be continuous, it will be broken if we trigger a new materialization // before the previous one has either confirmed or failed. See discussion in // https://github.com/apache/flink/pull/22669#issuecomment-1593370772 . LOG.info( "materialization:{} not confirmed or failed or cancelled, skip trigger new one.", materializedId - 1); return Optional.empty(); } SequenceNumber upTo = stateChangelogWriter.nextSequenceNumber(); SequenceNumber lastMaterializedTo = changelogSnapshotState.lastMaterializedTo(); LOG.info( "Initialize Materialization. Current changelog writers last append to sequence number {}", upTo); if (upTo.compareTo(lastMaterializedTo) > 0) { LOG.info("Starting materialization from {} : {}", lastMaterializedTo, upTo); // This ID is not needed for materialization; But since we are re-using the // streamFactory that is designed for state backend snapshot, which requires unique // checkpoint ID. A faked materialized Id is provided here. long materializationID = materializedId++; MaterializationRunnable materializationRunnable = new MaterializationRunnable( keyedStateBackend.snapshot( materializationID, System.currentTimeMillis(), // TODO: implement its own streamFactory. streamFactory, CHECKPOINT_OPTIONS), materializationID, upTo); // log metadata after materialization is triggered changelogStateFactory.resetAllWritingMetaFlags(); return Optional.of(materializationRunnable); } else { LOG.debug( "Skip materialization, last materialized to {} : last log to {}", lastMaterializedTo, upTo); return Optional.empty(); } }
@Test public void testInitMaterialization() throws Exception { MockKeyedStateBackend<Integer> delegatedBackend = createMock(); ChangelogKeyedStateBackend<Integer> backend = createChangelog(delegatedBackend); try { Optional<MaterializationRunnable> runnable; appendMockStateChange(backend); // ensure there is non-materialized changelog runnable = backend.initMaterialization(); // 1. should trigger first materialization assertTrue("first materialization should be trigger.", runnable.isPresent()); appendMockStateChange(backend); // ensure there is non-materialized changelog // 2. should not trigger new one until the previous one has been confirmed or failed assertFalse(backend.initMaterialization().isPresent()); backend.handleMaterializationFailureOrCancellation( runnable.get().getMaterializationID(), runnable.get().getMaterializedTo(), null); runnable = backend.initMaterialization(); // 3. should trigger new one after previous one failed assertTrue(runnable.isPresent()); appendMockStateChange(backend); // ensure there is non-materialized changelog // 4. should not trigger new one until the previous one has been confirmed or failed assertFalse(backend.initMaterialization().isPresent()); backend.handleMaterializationResult( SnapshotResult.empty(), runnable.get().getMaterializationID(), runnable.get().getMaterializedTo()); checkpoint(backend, checkpointId).get().discardState(); backend.notifyCheckpointComplete(checkpointId); // 5. should trigger new one after previous one has been confirmed assertTrue(backend.initMaterialization().isPresent()); } finally { backend.close(); backend.dispose(); } }
@Override public void createNamespace(Namespace namespace) { checkNotNull(namespace, ERR_NULL_NAMESPACE); checkArgument(!Strings.isNullOrEmpty(namespace.getMetadata().getUid()), ERR_NULL_NAMESPACE_UID); k8sNamespaceStore.createNamespace(namespace); log.info(String.format(MSG_NAMESPACE, namespace.getMetadata().getName(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullNamespace() { target.createNamespace(null); }
@Override public String getMapName() { return mapName; }
@Test public void testGetMapName() { assertEquals("mapName", dataEvent.getMapName()); }
ByteBuffer serialize(final int endPadding) { final int sizeOfValueLength = Integer.BYTES; final int sizeOfPriorValue = priorValue == null ? 0 : priorValue.length; final int sizeOfOldValue = oldValue == null || priorValue == oldValue ? 0 : oldValue.length; final int sizeOfNewValue = newValue == null ? 0 : newValue.length; final byte[] serializedContext = recordContext.serialize(); final ByteBuffer buffer = ByteBuffer.allocate( serializedContext.length + sizeOfValueLength + sizeOfPriorValue + sizeOfValueLength + sizeOfOldValue + sizeOfValueLength + sizeOfNewValue + endPadding ); buffer.put(serializedContext); addValue(buffer, priorValue); if (oldValue == null) { buffer.putInt(NULL_VALUE_SENTINEL); } else if (Arrays.equals(priorValue, oldValue)) { buffer.putInt(OLD_PREV_DUPLICATE_VALUE_SENTINEL); } else { buffer.putInt(sizeOfOldValue); buffer.put(oldValue); } addValue(buffer, newValue); return buffer; }
@Test public void shouldSerializeOld() { final ProcessorRecordContext context = new ProcessorRecordContext(0L, 0L, 0, "topic", new RecordHeaders()); final byte[] serializedContext = context.serialize(); final byte[] oldValue = {(byte) 5}; final byte[] bytes = new BufferValue(null, oldValue, null, context).serialize(0).array(); final byte[] withoutContext = Arrays.copyOfRange(bytes, serializedContext.length, bytes.length); assertThat(withoutContext, is(ByteBuffer.allocate(Integer.BYTES * 3 + 1).putInt(-1).putInt(1).put(oldValue).putInt(-1).array())); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetDeleteWithErrors() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); group.setSubscribedTopics(Optional.of(Collections.singleton("bar"))); context.commitOffset("foo", "bar", 0, 100L, 0); // Delete the offset whose topic partition doesn't exist. context.testOffsetDeleteWith("foo", "bar1", 0, Errors.NONE); // Delete the offset from the topic that the group is subscribed to. context.testOffsetDeleteWith("foo", "bar", 0, Errors.GROUP_SUBSCRIBED_TO_TOPIC); }
@Override public void build(DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { downloadMetadataFile(publisher); try { pluginRequestProcessorRegistry.registerProcessorFor(CONSOLE_LOG.requestName(), ArtifactRequestProcessor.forFetchArtifact(publisher, environmentVariableContext)); final String message = format("[%s] Fetching pluggable artifact using plugin `%s`.", GoConstants.PRODUCT_NAME, artifactStore.getPluginId()); LOGGER.info(message); publisher.taggedConsumeLine(TaggedStreamConsumer.OUT, message); List<FetchArtifactEnvironmentVariable> newEnvironmentVariables = artifactExtension.fetchArtifact( artifactStore.getPluginId(), artifactStore, configuration, getMetadataFromFile(artifactId), agentWorkingDirectory()); updateEnvironmentVariableContextWith(publisher, environmentVariableContext, newEnvironmentVariables); } catch (Exception e) { publisher.taggedConsumeLine(TaggedStreamConsumer.ERR, e.getMessage()); LOGGER.error(e.getMessage(), e); throw new RuntimeException(e); } finally { pluginRequestProcessorRegistry.removeProcessorFor(CONSOLE_LOG.requestName()); } }
@Test public void shouldRegisterAndDeRegisterArtifactRequestProcessBeforeAndAfterPublishingPluggableArtifact() throws IOException { final FetchPluggableArtifactBuilder builder = new FetchPluggableArtifactBuilder(new RunIfConfigs(), new NullBuilder(), "", jobIdentifier, artifactStore, fetchPluggableArtifactTask.getConfiguration(), fetchPluggableArtifactTask.getArtifactId(), sourceOnServer, metadataDest.toFile(), checksumFileHandler); final Map<String, Object> metadata = Map.of("Version", "10.12.0"); Files.writeString(metadataDest, new Gson().toJson(Map.of("artifactId", metadata)), UTF_8); builder.build(publisher, new EnvironmentVariableContext(), null, artifactExtension, registry, UTF_8); InOrder inOrder = inOrder(registry, artifactExtension); inOrder.verify(registry, times(1)).registerProcessorFor(eq(CONSOLE_LOG.requestName()), ArgumentMatchers.any(ArtifactRequestProcessor.class)); inOrder.verify(artifactExtension).fetchArtifact(eq(PLUGIN_ID), eq(artifactStore), eq(fetchPluggableArtifactTask.getConfiguration()), eq(metadata), eq(metadataDest.getParent().toString())); inOrder.verify(registry, times(1)).removeProcessorFor(CONSOLE_LOG.requestName()); }
public static Format fromName(final String name) { switch (name.toUpperCase()) { case AvroFormat.NAME: return AVRO; case JsonFormat.NAME: return JSON; case JsonSchemaFormat.NAME: return JSON_SR; case ProtobufFormat.NAME: return PROTOBUF; case ProtobufNoSRFormat.NAME: return PROTOBUF_NOSR; case KafkaFormat.NAME: return KAFKA; case DelimitedFormat.NAME: return DELIMITED; case NoneFormat.NAME: return NONE; default: throw new KsqlException("Unknown format: " + name); } }
@Test public void shouldCreateFromNameWithCaseInsensitivity() { // When: final Format format = FormatFactory.fromName("aVrO"); // Then: assertThat(format, is(FormatFactory.AVRO)); }
@Override public boolean isWarProject() { String packaging = project.getPackaging(); return "war".equals(packaging) || "gwt-app".equals(packaging); }
@Test public void testIsWarProject() { assertThat(mavenProjectProperties.isWarProject()).isFalse(); }
public String getMethod(){ return method; }
@Test public void testPostMultipartFormData() throws Exception { String url = "http://localhost/matrix.html"; // A HTTP POST request, multipart/form-data, simple values, String contentEncoding = "UTF-8"; String boundary = "xf8SqlDNvmn6mFYwrioJaeUR2_Z4cLRXOSmB"; String endOfLine = "\r\n"; String titleValue = "mytitle"; String descriptionValue = "mydescription"; String postBody = createMultipartFormBody(titleValue, descriptionValue, contentEncoding, true, boundary, endOfLine); String testPostRequest = createMultipartFormRequest(url, postBody, contentEncoding, boundary, endOfLine); HTTPSamplerBase s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); assertTrue(s.getDoMultipart()); // Check arguments Arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "title", titleValue, titleValue, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "description", descriptionValue, descriptionValue, contentEncoding, false); // A HTTP POST request, multipart/form-data, simple values, // with \r\n as end of line, which is according to spec, // and with more headers in each multipart endOfLine = "\r\n"; titleValue = "mytitle"; descriptionValue = "mydescription"; postBody = createMultipartFormBody(titleValue, descriptionValue, contentEncoding, true, boundary, endOfLine); testPostRequest = createMultipartFormRequest(url, postBody, contentEncoding, boundary, endOfLine); s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); assertTrue(s.getDoMultipart()); // Check arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "title", titleValue, titleValue, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "description", descriptionValue, descriptionValue, contentEncoding, false); // A HTTP POST request, multipart/form-data, simple values, // with \n as end of line, which should also be handled, // and with more headers in each multipart endOfLine = "\n"; titleValue = "mytitle"; descriptionValue = "mydescription"; postBody = createMultipartFormBody(titleValue, descriptionValue, contentEncoding, true, boundary, endOfLine); testPostRequest = createMultipartFormRequest(url, postBody, contentEncoding, boundary, endOfLine); s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); assertTrue(s.getDoMultipart()); // Check arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "title", titleValue, titleValue, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "description", descriptionValue, descriptionValue, contentEncoding, false); // A HTTP POST request, multipart/form-data, with value that will change // if they are url encoded // Values are similar to __VIEWSTATE parameter that .net uses endOfLine = "\r\n"; titleValue = "/wEPDwULLTE2MzM2OTA0NTYPZBYCAgMPZ/rA+8DZ2dnZ2dnZ2d/GNDar6OshPwdJc="; descriptionValue = "mydescription"; postBody = createMultipartFormBody(titleValue, descriptionValue, contentEncoding, true, boundary, endOfLine); testPostRequest = createMultipartFormRequest(url, postBody, contentEncoding, boundary, endOfLine); s = getSamplerForRequest(url, testPostRequest, contentEncoding); assertEquals(HTTPConstants.POST, s.getMethod()); assertEquals(contentEncoding, s.getContentEncoding()); assertTrue(s.getDoMultipart()); // Check arguments arguments = s.getArguments(); assertEquals(2, arguments.getArgumentCount()); checkArgument((HTTPArgument)arguments.getArgument(0), "title", titleValue, titleValue, contentEncoding, false); checkArgument((HTTPArgument)arguments.getArgument(1), "description", descriptionValue, descriptionValue, contentEncoding, false); }
public static ActiveRuleKey parse(String s) { Preconditions.checkArgument(s.split(":").length >= 3, "Bad format of activeRule key: " + s); int semiColonPos = s.indexOf(':'); String ruleProfileUuid = s.substring(0, semiColonPos); String ruleKey = s.substring(semiColonPos + 1); return new ActiveRuleKey(ruleProfileUuid, RuleKey.parse(ruleKey)); }
@Test void parse() { ActiveRuleKey key = ActiveRuleKey.parse("P1:xoo:R1"); assertThat(key.getRuleProfileUuid()).isEqualTo("P1"); assertThat(key.getRuleKey().repository()).isEqualTo("xoo"); assertThat(key.getRuleKey().rule()).isEqualTo("R1"); }
@Override public CloseableIterator<String> readLines(Component file) { requireNonNull(file, "Component should not be null"); checkArgument(file.getType() == FILE, "Component '%s' is not a file", file); Optional<CloseableIterator<String>> linesIteratorOptional = reportReader.readFileSource(file.getReportAttributes().getRef()); checkState(linesIteratorOptional.isPresent(), "File '%s' has no source code", file); CloseableIterator<String> lineIterator = linesIteratorOptional.get(); return new ComponentLinesCloseableIterator(file, lineIterator, file.getFileAttributes().getLines()); }
@Test public void read_lines_from_report() { reportReader.putFileSourceLines(FILE_REF, "line1", "line2"); assertThat(underTest.readLines(createComponent(2))).toIterable().containsOnly("line1", "line2"); }
@ColorInt public int strokeColorFor(int elementIndex) { final float fractionShift = elementIndex * mTrailFraction; final int r = shiftColor(Color.red(mTrailStartColor), Color.red(mTrailEndColor), fractionShift); final int g = shiftColor(Color.green(mTrailStartColor), Color.green(mTrailEndColor), fractionShift); final int b = shiftColor(Color.blue(mTrailStartColor), Color.blue(mTrailEndColor), fractionShift); final int a = shiftColor(Color.alpha(mTrailStartColor), Color.alpha(mTrailEndColor), fractionShift); return Color.argb(a, r, g, b); }
@Test public void testStrokeColorFor() { GestureTrailTheme underTest = new GestureTrailTheme( Color.argb(200, 60, 120, 240), Color.argb(100, 30, 240, 200), 100f, 20f, 20); Assert.assertEquals(Color.argb(200, 60, 120, 240), underTest.strokeColorFor(0)); Assert.assertEquals(Color.argb(100, 30, 240, 200), underTest.strokeColorFor(20)); Assert.assertEquals(Color.argb(150, 45, 180, 220), underTest.strokeColorFor(10)); }
public static String dashToCamelCase(final String text) { return dashToCamelCase(text, false); }
@Test public void testCapitalizeDash() { assertNull(StringHelper.dashToCamelCase(null)); assertEquals("", StringHelper.dashToCamelCase("")); assertEquals("hello", StringHelper.dashToCamelCase("hello")); assertEquals("helloGreat", StringHelper.dashToCamelCase("helloGreat")); assertEquals("helloGreat", StringHelper.dashToCamelCase("hello-great")); assertEquals("helloGreatWorld", StringHelper.dashToCamelCase("hello-great-world")); }
@Override public DateTime timeoutTime(Timeout timeout) { return timeoutTime(timeout.inMillis()); }
@Test public void shouldGiveTimeoutTime() throws Exception { DateTime expected = new DateTime().plusMillis((int) Timeout.NINETY_SECONDS.inMillis()); DateTime actual = new SystemTimeClock().timeoutTime(Timeout.NINETY_SECONDS); assertThat(actual.equals(expected) || actual.isAfter(expected),is(true)); }
public ProtocolBuilder keepAlive(Boolean keepAlive) { this.keepAlive = keepAlive; return getThis(); }
@Test void keepAlive() { ProtocolBuilder builder = new ProtocolBuilder(); builder.keepAlive(true); Assertions.assertTrue(builder.build().getKeepAlive()); }
public Metadata() { super(); }
@Test public void testMetadata() throws IOException, InterruptedException { int port = Utils.findOpenPort(); String host = "tcp://127.0.0.1:" + port; Ctx ctx = ZMQ.createContext(); // Spawn ZAP handler // We create and bind ZAP socket in main thread to avoid case // where child thread does not start up fast enough. SocketBase handler = ZMQ.socket(ctx, ZMQ.ZMQ_REP); assertThat(handler, notNullValue()); boolean rc = ZMQ.bind(handler, "inproc://zeromq.zap.01"); assertThat(rc, is(true)); Thread thread = new Thread(new ZapHandler(handler)); thread.start(); // Server socket will accept connections SocketBase server = ZMQ.socket(ctx, ZMQ.ZMQ_DEALER); assertThat(server, notNullValue()); SocketBase client = ZMQ.socket(ctx, ZMQ.ZMQ_DEALER); assertThat(client, notNullValue()); ZMQ.setSocketOption(server, ZMQ.ZMQ_ZAP_DOMAIN, "DOMAIN"); ZMQ.setSocketOption(server, ZMQ.ZMQ_SELFADDR_PROPERTY_NAME, "X-Local-Address"); rc = ZMQ.bind(server, host); assertThat(rc, is(true)); rc = ZMQ.connect(client, host); assertThat(rc, is(true)); int ret = ZMQ.send(client, "This is a message", 0); assertThat(ret, is(17)); Msg msg = ZMQ.recv(server, 0); assertThat(msg, notNullValue()); String prop = ZMQ.getMessageMetadata(msg, "Socket-Type"); assertThat(prop, is("DEALER")); prop = ZMQ.getMessageMetadata(msg, "User-Id"); assertThat(prop, is("anonymous")); prop = ZMQ.getMessageMetadata(msg, "Peer-Address"); assertThat(prop.startsWith("127.0.0.1:"), is(true)); prop = ZMQ.getMessageMetadata(msg, "no such"); assertThat(prop, nullValue()); prop = ZMQ.getMessageMetadata(msg, "Hello"); assertThat(prop, is("World")); prop = ZMQ.getMessageMetadata(msg, "X-Local-Address"); assertThat(prop, is("127.0.0.1:" + port)); ZMQ.closeZeroLinger(server); ZMQ.closeZeroLinger(client); // Shutdown ZMQ.term(ctx); // Wait until ZAP handler terminates thread.join(); }
public void patchBoardById( final Long boardId, final Long memberId, final BoardUpdateRequest request ) { Board board = findBoardWithImages(boardId); board.validateWriter(memberId); BoardUpdateResult result = board.update(request.title(), request.content(), request.addedImages(), request.deletedImages(), imageConverter); imageUploader.upload(result.addedImages(), request.addedImages()); imageUploader.delete(result.deletedImages()); }
@Test void 게시글_주인이_다르면_수정하지_못한다() { // given Board savedBoard = boardRepository.save(게시글_생성_사진없음()); BoardUpdateRequest req = new BoardUpdateRequest("수정", "수정", new ArrayList<>(), new ArrayList<>()); // when & then assertThatThrownBy(() -> boardService.patchBoardById(savedBoard.getId(), savedBoard.getWriterId() + 1, req)) .isInstanceOf(WriterNotEqualsException.class); }
public <T extends VFSConnectionDetails> boolean test( @NonNull ConnectionManager manager, @NonNull T details, @Nullable VFSConnectionTestOptions options ) throws KettleException { if ( options == null ) { options = new VFSConnectionTestOptions(); } // The specified connection details may not exist saved in the meta-store, // but still needs to have a non-empty name in it, to be able to form a temporary PVFS URI. if ( StringUtils.isEmpty( details.getName() ) ) { return false; } VFSConnectionProvider<T> provider = getExistingProvider( manager, details ); if ( !provider.test( details ) ) { return false; } if ( !details.isRootPathSupported() || options.isRootPathIgnored() ) { return true; } String resolvedRootPath; try { resolvedRootPath = getResolvedRootPath( details ); } catch ( KettleException e ) { // Invalid root path. return false; } if ( resolvedRootPath == null ) { return !details.isRootPathRequired(); } // Ensure that root path exists and is a folder. return isFolder( getConnectionRootProviderFileObject( manager, provider, details ) ); }
@Test public void testTestReturnsTrueWithValidButNotNormalizedRootPath() throws KettleException { when( vfsConnectionDetails.getRootPath() ).thenReturn( "./sub-folder/\\../other-sub-folder//" ); assertTrue( vfsConnectionManagerHelper.test( connectionManager, vfsConnectionDetails, getTestOptionsCheckRootPath() ) ); }
public IterationResult<T> iterate(@Nonnull UUID cursorId, int maxCount) { requireNonNull(cursorId); if (cursorId.equals(this.prevCursorId)) { access(); // no progress, no need to forget a cursor id, so null return new IterationResult<>(this.page, this.cursorId, null); } else if (!cursorId.equals(this.cursorId)) { throw new IllegalStateException("The cursor id " + cursorId + " is not the current cursor id nor the previous cursor id."); } List<T> currentPage = new ArrayList<>(maxCount); while (currentPage.size() < maxCount && iterator.hasNext()) { currentPage.add(iterator.next()); } UUID cursorIdToForget = this.prevCursorId; this.prevCursorId = this.cursorId; this.cursorId = UuidUtil.newUnsecureUUID(); this.page = currentPage; access(); return new IterationResult<>(this.page, this.cursorId, cursorIdToForget); }
@Test public void testIterateWithInitialCursorId() { int pageSize = 100; IterationResult<Integer> result = iterator.iterate(initialCursorId, pageSize); // the first iteration does not forget anything assertIterationResult(result, initialCursorId, null, 0, pageSize); }
public List<String> collectErrorsFromAllNodes() { List<String> errors = new ArrayList<>(); for (T node : mNodeResults.values()) { // add all the errors for this node, with the node appended to prefix for (String err : node.getErrors()) { errors.add(String.format("%s :%s", node.getBaseParameters().mId, err)); } } return errors; }
@Test public void collectErrorFromAllNodesWithNoErrors() { // test summary with node and no error TestMultipleNodeSummary summary = new TestMultipleNodeSummary(); summary.addTaskResultWithoutErrors(6); List<String> emptyList = summary.collectErrorsFromAllNodes(); assertTrue(emptyList.isEmpty()); }
public boolean matches(String matchUrl) { if (url.equals(matchUrl)) return true; Iterator<UrlPathPart> iter1 = new MatchUrl(matchUrl).pathParts.iterator(); Iterator<UrlPathPart> iter2 = pathParts.iterator(); while (iter1.hasNext() && iter2.hasNext()) if (!iter1.next().matches(iter2.next())) return false; return !iter1.hasNext() && !iter2.hasNext(); }
@Test void testMatch() { boolean matches = new MatchUrl("/api/jobs").matches("/api/jobs"); assertThat(matches).isTrue(); }
List<Integer> allocatePorts(NetworkPortRequestor service, int wantedPort) { PortAllocBridge allocator = new PortAllocBridge(this, service); service.allocatePorts(wantedPort, allocator); return allocator.result(); }
@Test void port_above_vespas_port_range_can_be_reserved() { HostPorts host = new HostPorts("myhostname"); MockRoot root = new MockRoot(); host.allocatePorts(new TestService(root, 1), HostPorts.BASE_PORT + HostPorts.MAX_PORTS + 1); }
public SecretStore load(SecureConfig secureConfig) { return doIt(MODE.LOAD, secureConfig); }
@Test public void testErrorLoading() { assertThrows(SecretStoreException.LoadException.class, () -> { //default implementation requires a path secretStoreFactory.load(new SecureConfig()); }); }
@Override public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) { if (topics instanceof TopicIdCollection) return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics."); }
@Test public void testDescribeTopicPartitionsApiWithAuthorizedOps() throws ExecutionException, InterruptedException { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); String topicName0 = "test-0"; Uuid topicId = Uuid.randomUuid(); int authorisedOperations = Utils.to32BitField(Utils.mkSet(AclOperation.DESCRIBE.code(), AclOperation.ALTER.code())); env.kafkaClient().prepareResponse( prepareDescribeClusterResponse(0, env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 2, authorisedOperations) ); DescribeTopicPartitionsResponseData responseData = new DescribeTopicPartitionsResponseData(); responseData.topics().add(new DescribeTopicPartitionsResponseTopic() .setErrorCode((short) 0) .setTopicId(topicId) .setName(topicName0) .setIsInternal(false) .setTopicAuthorizedOperations(authorisedOperations)); env.kafkaClient().prepareResponse(new DescribeTopicPartitionsResponse(responseData)); DescribeTopicsResult result = env.adminClient().describeTopics( singletonList(topicName0), new DescribeTopicsOptions().includeAuthorizedOperations(true) ); Map<String, TopicDescription> topicDescriptions = result.allTopicNames().get(); TopicDescription topicDescription = topicDescriptions.get(topicName0); assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), topicDescription.authorizedOperations()); } }
@PublicAPI(usage = ACCESS) public boolean contains(String part) { return uri.toString().contains(part); }
@Test @UseDataProvider("locations_of_own_class") public void contains(Location location) { assertThat(location.contains("archunit")).as("location contains 'archunit'").isTrue(); assertThat(location.contains("/archunit/")).as("location contains '/archunit/'").isTrue(); assertThat(location.contains(getClass().getSimpleName())).as("location contains own simple class name").isTrue(); assertThat(location.contains("wrong")).as("location contains 'wrong'").isFalse(); }
public static Map<String, String> parseToMap(String attributesModification) { if (Strings.isNullOrEmpty(attributesModification)) { return new HashMap<>(); } // format: +key1=value1,+key2=value2,-key3,+key4=value4 Map<String, String> attributes = new HashMap<>(); String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA); for (String kv : kvs) { String key; String value; if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) { String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN); key = splits[0]; value = splits[1]; if (!key.contains(ATTR_ADD_PLUS_SIGN)) { throw new RuntimeException("add/alter attribute format is wrong: " + key); } } else { key = kv; value = ""; if (!key.contains(ATTR_DELETE_MINUS_SIGN)) { throw new RuntimeException("delete attribute format is wrong: " + key); } } String old = attributes.put(key, value); if (old != null) { throw new RuntimeException("key duplication: " + key); } } return attributes; }
@Test public void testParseToMap() { Assert.assertEquals(0, AttributeParser.parseToMap(null).size()); AttributeParser.parseToMap("++=++"); AttributeParser.parseToMap("--"); Assert.assertThrows(RuntimeException.class, () -> AttributeParser.parseToMap("x")); Assert.assertThrows(RuntimeException.class, () -> AttributeParser.parseToMap("+")); Assert.assertThrows(RuntimeException.class, () -> AttributeParser.parseToMap("++")); }
@Override public void commitJob(JobContext jobContext) throws IOException { Configuration conf = jobContext.getConfiguration(); syncFolder = conf.getBoolean(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, false); overwrite = conf.getBoolean(DistCpConstants.CONF_LABEL_OVERWRITE, false); updateRoot = conf.getBoolean(CONF_LABEL_UPDATE_ROOT, false); targetPathExists = conf.getBoolean( DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS, true); ignoreFailures = conf.getBoolean( DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false); if (blocksPerChunk > 0) { concatFileChunks(conf); } super.commitJob(jobContext); cleanupTempFiles(jobContext); try { if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) { deleteMissing(conf); } else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) { commitData(conf); } else if (conf.get(CONF_LABEL_TRACK_MISSING) != null) { // save missing information to a directory trackMissing(conf); } // for HDFS-14621, should preserve status after -delete String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS); final boolean preserveRawXattrs = conf.getBoolean( DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false); if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) { preserveFileAttributesForDirectories(conf); } taskAttemptContext.setStatus("Commit Successful"); } finally { cleanup(conf); } }
@Test public void testPreserveStatusWithAtomicCommit() throws IOException { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String workBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); FsPermission sourcePerm = new FsPermission((short) 511); FsPermission initialPerm = new FsPermission((short) 448); sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm); workBase = TestDistCpUtils.createTestSetup(fs, initialPerm); targetBase = "/tmp1/" + rand.nextLong(); final DistCpOptions options = new DistCpOptions.Builder( Collections.singletonList(new Path(sourceBase)), new Path("/out")) .preserve(FileAttribute.PERMISSION).build(); options.appendToConf(conf); final DistCpContext context = new DistCpContext(options); context.setTargetPathExists(false); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + rand.nextLong()); listing.buildListing(listingFile, context); conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); conf.set(CONF_LABEL_TARGET_WORK_PATH, workBase); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); committer.commitJob(jobContext); checkDirectoryPermissions(fs, targetBase, sourcePerm); } finally { TestDistCpUtils.delete(fs, "/tmp1"); conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS); } }
@Nonnull public static String generateName(@Nonnull String alphabet, int length, int seed) { Random r = new Random(seed); StringBuilder sb = new StringBuilder(); while (sb.length() < length) sb.append(alphabet.charAt(r.nextInt(alphabet.length()))); return sb.toString(); }
@Test void testGenerateName() { String generated = StringUtil.generateName("abcdefg", 10, 0); assertNotNull(generated); assertEquals(10, generated.length()); }
@Override public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options) { AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, TransactionDescription> future = DescribeTransactionsHandler.newFuture(transactionalIds); DescribeTransactionsHandler handler = new DescribeTransactionsHandler(logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeTransactionsResult(future.all()); }
@Test public void testDescribeTransactions() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { String transactionalId = "foo"; Node coordinator = env.cluster().nodes().iterator().next(); TransactionDescription expected = new TransactionDescription( coordinator.id(), TransactionState.COMPLETE_COMMIT, 12345L, 15, 10000L, OptionalLong.empty(), emptySet()); env.kafkaClient().prepareResponse( request -> request instanceof FindCoordinatorRequest, prepareFindCoordinatorResponse(Errors.NONE, transactionalId, coordinator) ); env.kafkaClient().prepareResponseFrom( request -> request instanceof DescribeTransactionsRequest, new DescribeTransactionsResponse(new DescribeTransactionsResponseData().setTransactionStates( singletonList(new DescribeTransactionsResponseData.TransactionState() .setErrorCode(Errors.NONE.code()) .setProducerEpoch((short) expected.producerEpoch()) .setProducerId(expected.producerId()) .setTransactionalId(transactionalId) .setTransactionTimeoutMs(10000) .setTransactionStartTimeMs(-1) .setTransactionState(expected.state().toString()) ) )), coordinator ); DescribeTransactionsResult result = env.adminClient().describeTransactions(singleton(transactionalId)); KafkaFuture<TransactionDescription> future = result.description(transactionalId); assertEquals(expected, future.get()); } }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testShayzienAdvancedAgilityLap() { // This sets lastBoss ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Shayzien Advanced Agility Course lap count is: <col=ff0000>2</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Lap duration: <col=ff0000>1:01</col> (new personal best).", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "shayzien advanced agility course", 61.0); verify(configManager).setRSProfileConfiguration("killcount", "shayzien advanced agility course", 2); // Precise times chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Lap duration: <col=ff0000>1:01.20</col> (new personal best).", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "shayzien advanced agility course", 61.2); }
public static List<String> loadAndModifyConfiguration(String[] args) throws FlinkException { return ConfigurationParserUtils.loadAndModifyConfiguration( filterCmdArgs(args, ModifiableClusterConfigurationParserFactory.options()), BashJavaUtils.class.getSimpleName()); }
@TestTemplate void testloadAndModifyConfigurationDynamicPropertyWithoutSpace() throws Exception { String[] args = {"--configDir", confDir.toFile().getAbsolutePath(), "-Dkey=value"}; List<String> list = FlinkConfigLoader.loadAndModifyConfiguration(args); if (standardYaml) { assertThat(list).containsExactly("test:", " key: " + TEST_CONFIG_VALUE, "key: value"); } else { assertThat(list) .containsExactlyInAnyOrder( TEST_CONFIG_KEY + ": " + TEST_CONFIG_VALUE, "key: value"); } }
@Override public void start() { boolean hasExternalPlugins = pluginRepository.getPlugins().stream().anyMatch(plugin -> plugin.getType().equals(PluginType.EXTERNAL)); try (DbSession session = dbClient.openSession(false)) { PropertyDto property = Optional.ofNullable(dbClient.propertiesDao().selectGlobalProperty(session, PLUGINS_RISK_CONSENT)) .orElse(defaultPluginRiskConsentProperty()); if (hasExternalPlugins && NOT_ACCEPTED == PluginRiskConsent.valueOf(property.getValue())) { addWarningInSonarDotLog(); property.setValue(REQUIRED.name()); dbClient.propertiesDao().saveProperty(session, property); session.commit(); } else if (!hasExternalPlugins && REQUIRED == PluginRiskConsent.valueOf(property.getValue())) { dbClient.propertiesDao().deleteGlobalProperty(PLUGINS_RISK_CONSENT, session); session.commit(); } } }
@Test public void require_consent_when_exist_external_plugins_and_not_accepted() { setupExternalPluginConsent(NOT_ACCEPTED); setupExternalPlugin(); underTest.start(); assertThat(dbClient.propertiesDao().selectGlobalProperty(PLUGINS_RISK_CONSENT)) .extracting(PropertyDto::getValue) .isEqualTo(REQUIRED.name()); }
@Override public Optional<SpanEvent> onNormalSpanStart(TracingRequest tracingRequest) { if (!isTracing) { return Optional.empty(); } Optional<SpanEvent> spanEventOptional = configureSpanEvent(tracingRequest); if (!spanEventOptional.isPresent()) { return spanEventOptional; } SpanEvent spanEvent = spanEventOptional.get(); return Optional.of(spanEvent); }
@Test public void onNormalSpanStart() { try (MockedStatic<ServiceManager> mockedStatic = Mockito.mockStatic(ServiceManager.class)) { mockedStatic.when(() -> ServiceManager.getService(GatewayClient.class)) .thenReturn(new NettyGatewayClient()); TracingServiceImpl tracingService = new TracingServiceImpl(); // Service is stopped. tracingService.stop(); TracingRequest tracingRequest = new TracingRequest("", "", "", "", ""); Assert.assertFalse(tracingService.onNormalSpanStart(tracingRequest).isPresent()); // SpanEventContext is null. tracingService.start(); Assert.assertFalse(tracingService.onNormalSpanStart(tracingRequest).isPresent()); // The normal condition. String traceId = TracingUtils.generateTraceId(); header.put(TRACE_ID, traceId); header.put(PARENT_SPAN_ID, INIT_PARENT_SPAN_ID); header.put(SPAN_ID_PREFIX, INIT_SPAN_ID_PREFIX); tracingService.onProviderSpanStart(tracingRequest, extractService, header); tracingService.onSpanFinally(); Optional<SpanEvent> spanEventOptional = tracingService.onNormalSpanStart(tracingRequest); Assert.assertTrue(spanEventOptional.isPresent()); Assert.assertEquals(traceId, spanEventOptional.get().getTraceId()); Assert.assertEquals("0-0-1", spanEventOptional.get().getSpanId()); Assert.assertEquals(INIT_SPAN_ID, spanEventOptional.get().getParentSpanId()); tracingService.onSpanFinally(); // The second children span. spanEventOptional = tracingService.onNormalSpanStart(tracingRequest); Assert.assertTrue(spanEventOptional.isPresent()); Assert.assertEquals(traceId, spanEventOptional.get().getTraceId()); Assert.assertEquals("0-0-2", spanEventOptional.get().getSpanId()); Assert.assertEquals(INIT_SPAN_ID, spanEventOptional.get().getParentSpanId()); tracingService.onSpanFinally(); tracingService.stop(); } }
@Override public boolean test(Pair<Point, Point> pair) { if (timeDeltaIsSmall(pair.first().time(), pair.second().time())) { return distIsSmall(pair); } else { /* * reject points with large time deltas because we don't want to rely on a numerically * unstable process */ return false; } }
@Test public void testCase1() { DistanceFilter filter = newTestFilter(); LatLong position1 = new LatLong(0.0, 0.0); double tooFarInNm = MAX_DISTANCE_IN_FEET * 3.0 / Spherical.feetPerNM(); Point p1 = new PointBuilder() .latLong(position1) .time(Instant.EPOCH) .altitude(Distance.ofFeet(500.0)) .build(); Point p2 = new PointBuilder() .latLong(position1.projectOut(90.0, tooFarInNm)) //move the position .time(Instant.EPOCH.plusMillis(MAX_TIME_DELTA_IN_MILLISEC * 2)) .altitude(Distance.ofFeet(500.0)) .build(); assertFalse(filter.test(Pair.of(p1, p2))); assertFalse(filter.test(Pair.of(p2, p1))); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void modName() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/mod_name.txt")), CrashReportAnalyzer.Rule.MOD_NAME); }
@Override @SuppressWarnings("unchecked") public <K, V> List<Map<K, V>> toMaps(DataTable dataTable, Type keyType, Type valueType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(keyType, "keyType may not be null"); requireNonNull(valueType, "valueType may not be null"); if (dataTable.isEmpty()) { return emptyList(); } DataTableType keyConverter = registry.lookupCellTypeByType(keyType); DataTableType valueConverter = registry.lookupCellTypeByType(valueType); List<String> problems = new ArrayList<>(); if (keyConverter == null) { problems.add(problemNoTableCellTransformer(keyType)); } if (valueConverter == null) { problems.add(problemNoTableCellTransformer(valueType)); } if (!problems.isEmpty()) { throw mapsNoConverterDefined(keyType, valueType, problems); } DataTable header = dataTable.rows(0, 1); List<Map<K, V>> result = new ArrayList<>(); List<K> keys = unpack((List<List<K>>) keyConverter.transform(header.cells())); DataTable rows = dataTable.rows(1); if (rows.isEmpty()) { return emptyList(); } List<List<V>> transform = (List<List<V>>) valueConverter.transform(rows.cells()); for (List<V> values : transform) { result.add(createMap(keyType, keys, valueType, values)); } return unmodifiableList(result); }
@Test void to_maps_of_unknown_key_type__throws_exception__register_table_cell_transformer() { DataTable table = parse("", "| lat | lon |", "| 29.993333 | -90.258056 |", "| 37.618889 | -122.375 |", "| 47.448889 | -122.309444 |", "| 40.639722 | -73.778889 |"); CucumberDataTableException exception = assertThrows( CucumberDataTableException.class, () -> converter.toMaps(table, String.class, Coordinate.class)); assertThat(exception.getMessage(), is("" + "Can't convert DataTable to List<Map<java.lang.String, io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Coordinate>>.\n" + "Please review these problems:\n" + "\n" + " - There was no table cell transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Coordinate.\n" + " Please consider registering a table cell transformer.\n" + "\n" + "Note: Usually solving one is enough")); }
@Override public void validateDeptList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得科室信息 Map<Long, DeptDO> deptMap = getDeptMap(ids); // 校验 ids.forEach(id -> { DeptDO dept = deptMap.get(id); if (dept == null) { throw exception(DEPT_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) { throw exception(DEPT_NOT_ENABLE, dept.getName()); } }); }
@Test public void testValidateDeptList_notEnable() { // mock 数据 DeptDO deptDO = randomPojo(DeptDO.class).setStatus(CommonStatusEnum.DISABLE.getStatus()); deptMapper.insert(deptDO); // 准备参数 List<Long> ids = singletonList(deptDO.getId()); // 调用, 并断言异常 assertServiceException(() -> deptService.validateDeptList(ids), DEPT_NOT_ENABLE, deptDO.getName()); }
public static Build withPropertyValue(String propertyValue) { return new Builder(propertyValue); }
@Test void it_should_return_client_type_and_ignore_case_when_property_value_exists() { //GIVEN String clientType = "hTtP"; //WHEN ElasticsearchClientType esClientType = ElasticsearchClientTypeBuilder.withPropertyValue(clientType).build(); //THEN assertEquals(HTTP, esClientType); }
List<String> liveKeysAsOrderedList() { return new ArrayList<String>(liveMap.keySet()); }
@Test public void empty0() { long now = 3000; tracker.removeStaleComponents(now); assertEquals(0, tracker.liveKeysAsOrderedList().size()); assertEquals(0, tracker.getComponentCount()); }
@Override public Response request(Request request, long timeoutMills) throws NacosException { DefaultRequestFuture pushFuture = sendRequestInner(request, null); try { return pushFuture.get(timeoutMills); } catch (Exception e) { throw new NacosException(NacosException.SERVER_ERROR, e); } finally { RpcAckCallbackSynchronizer.clearFuture(getMetaInfo().getConnectionId(), pushFuture.getRequestId()); } }
@Test void testIllegalStateException() { Mockito.doReturn(new DefaultEventLoop()).when(channel).eventLoop(); Mockito.doThrow(new IllegalStateException()).when(streamObserver).onNext(Mockito.any()); Mockito.doReturn(true).when(streamObserver).isReady(); try { connection.request(new NotifySubscriberRequest(), 1000L); assertTrue(false); } catch (Exception e) { assertTrue(e instanceof ConnectionAlreadyClosedException); assertTrue(e.getCause() instanceof IllegalStateException); } }
@Override public List<Type> getTypes() { checkState(types != null, "Homepage types have not been initialized yet"); return types; }
@Test public void types() { assertThat(underTest.getTypes()).containsExactlyInAnyOrder(PROJECT, PROJECTS, ISSUES, PORTFOLIOS, PORTFOLIO, APPLICATION); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> stream, final StreamSelectKey<K> selectKey, final RuntimeBuildContext buildContext ) { return build(stream, selectKey, buildContext, PartitionByParamsFactory::build); }
@Test public void shouldUseCorrectNameInMapCall() { // When: StreamSelectKeyBuilder .build(stream, selectKey, buildContext, paramBuilder); // Then: verify(kstream).map(any(), nameCaptor.capture()); assertThat(NamedTestAccessor.getName(nameCaptor.getValue()), is("ya-SelectKey")); }
public void add(@NonNull Iterable<T> runItems) { addInternal(runItems); }
@Test public void test_latest_empty_page() { HistoryPageFilter<ModelObject> historyPageFilter = newPage(5, null, null); Iterable<ModelObject> itemList = Collections.emptyList(); historyPageFilter.add(itemList); Assert.assertFalse(historyPageFilter.hasUpPage); Assert.assertFalse(historyPageFilter.hasDownPage); Assert.assertTrue(historyPageFilter.queueItems.isEmpty()); Assert.assertTrue(historyPageFilter.runs.isEmpty()); }
public static Class<?> forName(String name) throws ClassNotFoundException { return forName(name, getClassLoader()); }
@Test void testForName2() throws Exception { assertThat(ClassUtils.forName("byte") == byte.class, is(true)); assertThat(ClassUtils.forName("java.lang.String[]") == String[].class, is(true)); assertThat(ClassUtils.forName("[Ljava.lang.String;") == String[].class, is(true)); }
public byte exitStatus() { return exitStatus.exitStatus(); }
@Test void with_undefined_scenarios() { Runtime runtime = createRuntime(); bus.send(testCaseFinishedWithStatus(Status.UNDEFINED)); assertThat(runtime.exitStatus(), is(equalTo((byte) 0x1))); }
public static String sanitizeDescription(String javadoc, boolean summary) { if (isNullOrEmpty(javadoc)) { return null; } // lets just use what java accepts as identifiers StringBuilder sb = new StringBuilder(); // split into lines String[] lines = javadoc.split("\n"); boolean first = true; for (String line : lines) { line = line.trim(); if (line.startsWith("**")) { continue; } // remove leading javadoc * if (line.startsWith("*")) { line = line.substring(1); line = line.trim(); } // terminate if we reach @param, @return or @deprecated as we only want the javadoc summary if (line.startsWith("@param") || line.startsWith("@return") || line.startsWith("@deprecated")) { break; } // skip lines that are javadoc references if (line.startsWith("@")) { continue; } // we are starting from a new line, so add a whitespace if (!first) { sb.append(' '); } // append data String s = line.trim(); sb.append(s); boolean empty = isNullOrEmpty(s); boolean endWithDot = s.endsWith("."); boolean haveText = !sb.isEmpty(); if (haveText && summary && (empty || endWithDot)) { // if we only want a summary, then skip at first empty line we encounter, or if the sentence ends with a dot break; } first = false; } String s = sb.toString(); // remove all XML tags s = s.replaceAll("<.*?>", ""); // remove {@link inlined javadoc links which is special handled s = s.replaceAll("\\{@link\\s\\w+\\s(\\w+)}", "$1"); s = s.replaceAll("\\{@link\\s([\\w]+)}", "$1"); // also remove the commonly mistake to do with @{link s = s.replaceAll("@\\{link\\s\\w+\\s(\\w+)}", "$1"); s = s.replaceAll("@\\{link\\s([\\w]+)}", "$1"); // remove all inlined javadoc links, eg such as {@link org.apache.camel.spi.Registry} // use #? to remove leading # in case its a local reference s = s.replaceAll("\\{@\\w+\\s#?([\\w.#(\\d,)]+)}", "$1"); // create a new line StringBuilder cb = new StringBuilder(); for (char c : s.toCharArray()) { if (Character.isJavaIdentifierPart(c) || VALID_CHARS.indexOf(c) != -1) { cb.append(c); } else if (Character.isWhitespace(c)) { // always use space as whitespace, also for line feeds etc cb.append(' '); } } s = cb.toString(); // remove double whitespaces, and trim s = s.replaceAll("\\s+", " "); // unescape http links s = s.replaceAll("\\\\(http:|https:)", "$1"); return s.trim(); }
@Test public void testSanitizeJavaDoc() throws Exception { String s = "* more memory. The total size is provided in the {@link org.apache.camel.Exchange#SPLIT_SIZE} header."; String s2 = JavadocHelper.sanitizeDescription(s, false); Assertions.assertEquals("more memory. The total size is provided in the org.apache.camel.Exchange#SPLIT_SIZE header.", s2); String out = JavadocHelper.sanitizeDescription(JAVADOC, false); Assertions.assertEquals(EXPECTED_OUT, out); String out2 = JavadocHelper.sanitizeDescription(JAVADOC2, false); Assertions.assertEquals(EXPECTED_OUT2, out2); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final StoregateApiClient client = session.getClient(); final HttpUriRequest request = new HttpGet(String.format("%s/v4.2/download/files/%s?stream=true", client.getBasePath(), fileid.getFileId(file))); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = client.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response); case HttpStatus.SC_NOT_FOUND: fileid.cache(file, null); // Break through default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRange() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateTouchFeature(session, nodeid).touch(test, new TransferStatus()); final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); final byte[] content = RandomUtils.nextBytes(1023); final OutputStream out = local.getOutputStream(false); assertNotNull(out); IOUtils.write(content, out); out.close(); final TransferStatus upload = new TransferStatus().withLength(content.length); upload.setExists(true); new DefaultUploadFeature<>(new StoregateWriteFeature(session, nodeid)).upload( test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), upload, new DisabledConnectionCallback()); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setAppend(true); status.setOffset(100L); final InputStream in = new StoregateReadFeature(session, nodeid).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public BackgroundException map(final String message, final ApiException failure, final Path file) { switch(failure.getCode()) { case HttpStatus.SC_NOT_FOUND: fileid.cache(file, null); } return super.map(message, failure, file); }
@Test public void testDisconnect() { final StoregateExceptionMappingService service = new StoregateExceptionMappingService(new StoregateIdProvider( new StoregateSession(new Host(new StoregateProtocol()), new DisabledX509TrustManager(), new DefaultX509KeyManager()) )); assertEquals(ConnectionRefusedException.class, service.map( new ApiException(new ProcessingException(new SSLException(new SocketException("Operation timed out (Read failed)"))))).getClass()); }
void printStats(PrintStream out) { printNonZeroResultScenarios(out); if (stepSubCounts.getTotal() == 0) { out.println("0 Scenarios"); out.println("0 Steps"); } else { printScenarioCounts(out); printStepCounts(out); } printDuration(out); }
@Test void should_print_sub_counts_in_order_failed_ambiguous_skipped_undefined_passed_in_color() { Stats counter = createColorSummaryCounter(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); addOneStepScenario(counter, Status.PASSED); addOneStepScenario(counter, Status.FAILED); addOneStepScenario(counter, Status.AMBIGUOUS); addOneStepScenario(counter, Status.PENDING); addOneStepScenario(counter, Status.UNDEFINED); addOneStepScenario(counter, Status.SKIPPED); counter.printStats(new PrintStream(baos)); String colorSubCounts = "" + AnsiEscapes.RED + "1 failed" + AnsiEscapes.RESET + ", " + AnsiEscapes.RED + "1 ambiguous" + AnsiEscapes.RESET + ", " + AnsiEscapes.CYAN + "1 skipped" + AnsiEscapes.RESET + ", " + AnsiEscapes.YELLOW + "1 pending" + AnsiEscapes.RESET + ", " + AnsiEscapes.YELLOW + "1 undefined" + AnsiEscapes.RESET + ", " + AnsiEscapes.GREEN + "1 passed" + AnsiEscapes.RESET; assertThat(baos.toString(), containsString(String.format("" + "6 Scenarios (" + colorSubCounts + ")%n" + "6 Steps (" + colorSubCounts + ")%n"))); }
public static String[] splitString( String string, String separator ) { /* * 0123456 Example a;b;c;d --> new String[] { a, b, c, d } */ // System.out.println("splitString ["+path+"] using ["+separator+"]"); List<String> list = new ArrayList<>(); if ( string == null || string.length() == 0 ) { return new String[] {}; } int sepLen = separator.length(); int from = 0; int end = string.length() - sepLen + 1; for ( int i = from; i < end; i += sepLen ) { if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) { // OK, we found a separator, the string to add to the list // is [from, i[ list.add( nullToEmpty( string.substring( from, i ) ) ); from = i + sepLen; } } // Wait, if the string didn't end with a separator, we still have information at the end of the string... // In our example that would be "d"... if ( from + sepLen <= string.length() ) { list.add( nullToEmpty( string.substring( from, string.length() ) ) ); } return list.toArray( new String[list.size()] ); }
@Test public void testSplitStringWithDifferentDelimiterAndEnclosure() { // Try a different delimiter and enclosure String[] result = Const.splitString( "a;'b;c;d';'e,f';'g';h", ";", "'" ); assertNotNull( result ); assertEquals( 5, result.length ); assertEquals( "a", result[0] ); assertEquals( "'b;c;d'", result[1] ); assertEquals( "'e,f'", result[2] ); assertEquals( "'g'", result[3] ); assertEquals( "h", result[4] ); // Check for null and empty as the last split result = Const.splitString( "a;b;c;", ";", null ); assertNotNull( result ); assertEquals( 3, result.length ); result = Const.splitString( "a;b;c;''", ";", "'" ); assertNotNull( result ); assertEquals( 4, result.length ); }
public CompletableFuture<Void> awaitAsync() { phaser.arriveAndDeregister(); return terminationFuture; }
@Test void testShouldFinishAwaitAsyncImmediatelyIfNoRequests() { assertThat(inFlightRequestTracker.awaitAsync()).isDone(); }
public static DataflowRunnerInfo getDataflowRunnerInfo() { return LazyInit.INSTANCE; }
@Test public void getDataflowRunnerInfo() throws Exception { DataflowRunnerInfo info = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = info.getLegacyEnvironmentMajorVersion(); // Validate major version is a number assertTrue( String.format("Legacy environment major version number %s is not a number", version), version.matches("\\d+")); version = info.getFnApiEnvironmentMajorVersion(); // Validate major version is a number assertTrue( String.format("FnAPI environment major version number %s is not a number", version), version.matches("\\d+")); // Validate container versions do not contain the property name. assertThat( "legacy container version invalid", info.getFnApiDevContainerVersion(), not(containsString("dataflow.legacy_container_version"))); assertThat( "FnAPI container version invalid", info.getLegacyDevContainerVersion(), not(containsString("dataflow.fnapi_container_version"))); // Validate container base repository does not contain the property name // (indicating it was not filled in). assertThat( "container repository invalid", info.getContainerImageBaseRepository(), not(containsString("dataflow.container_base_repository"))); for (String property : new String[] {"java.vendor", "java.version", "os.arch", "os.name", "os.version"}) { assertEquals(System.getProperty(property), info.getProperties().get(property)); } }
@Override // Return true if a report needs to be sent for the current round; false if it can be suppressed for this round. public boolean needToReportLocalUsage(long currentBytesUsed, long lastReportedBytes, long currentMessagesUsed, long lastReportedMessages, long lastReportTimeMSecsSinceEpoch) { // If we are about to go more than maxUsageReportSuppressRounds without reporting, send a report. long currentTimeMSecs = System.currentTimeMillis(); long mSecsSinceLastReport = currentTimeMSecs - lastReportTimeMSecsSinceEpoch; if (mSecsSinceLastReport >= ResourceGroupService.maxIntervalForSuppressingReportsMSecs) { return true; } // If the percentage change (increase or decrease) in usage is more than a threshold for // either bytes or messages, send a report. final float toleratedDriftPercentage = ResourceGroupService.UsageReportSuppressionTolerancePercentage; if (currentBytesUsed > 0) { long diff = abs(currentBytesUsed - lastReportedBytes); float diffPercentage = (float) diff * 100 / lastReportedBytes; if (diffPercentage > toleratedDriftPercentage) { return true; } } if (currentMessagesUsed > 0) { long diff = abs(currentMessagesUsed - lastReportedMessages); float diffPercentage = (float) diff * 100 / lastReportedMessages; if (diffPercentage > toleratedDriftPercentage) { return true; } } return false; }
@Test public void testNeedToReportLocalUsage() { // If the percentage change (increase or decrease) in usage is more than 5% for // either bytes or messages, send a report. Assert.assertFalse(rqCalc.needToReportLocalUsage(1040, 1000, 104, 100, System.currentTimeMillis())); Assert.assertFalse(rqCalc.needToReportLocalUsage(950, 1000, 95, 100, System.currentTimeMillis())); Assert.assertTrue(rqCalc.needToReportLocalUsage(1060, 1000, 106, 100, System.currentTimeMillis())); Assert.assertTrue(rqCalc.needToReportLocalUsage(940, 1000, 94, 100, System.currentTimeMillis())); }
@VisibleForTesting @NonNull <T extends VFSConnectionDetails> FileName getConnectionRootProviderFileName( @NonNull VFSConnectionFileNameTransformer<T> fileNameTransformer, @NonNull T details ) throws KettleException { // Example: "pvfs://my-connection" ConnectionFileName rootPvfsFileName = getConnectionRootFileName( details ); // Example: "s3://root-path-bucket" // Would fail if only "s3://" return fileNameTransformer.toProviderFileName( rootPvfsFileName, details ); }
@Test( expected = KettleException.class ) public void testGetConnectionRootProviderFileNameThrowsWhenRootPathHasInvalidRelativeSegments() throws KettleException { when( vfsConnectionDetails.getRootPath() ).thenReturn( "../other-connection" ); vfsConnectionManagerHelper .getConnectionRootProviderFileName( vfsConnectionFileNameTransformer, vfsConnectionDetails ); }
public static OutputStreamAndPath createEntropyAware( FileSystem fs, Path path, WriteMode writeMode) throws IOException { final Path processedPath = addEntropy(fs, path); // create the stream on the original file system to let the safety net // take its effect final FSDataOutputStream out = fs.create(processedPath, writeMode); return new OutputStreamAndPath(out, processedPath); }
@Test void testWithSafetyNet() throws Exception { final String entropyKey = "__ekey__"; final String entropyValue = "abc"; final File folder = TempDirUtils.newFolder(tempFolder); final Path path = new Path(Path.fromLocalFile(folder), entropyKey + "/path/"); final Path pathWithEntropy = new Path(Path.fromLocalFile(folder), entropyValue + "/path/"); TestEntropyInjectingFs efs = new TestEntropyInjectingFs(entropyKey, entropyValue); FSDataOutputStream out; FileSystemSafetyNet.initializeSafetyNetForThread(); FileSystem fs = FileSystemSafetyNet.wrapWithSafetyNetWhenActivated(efs); try { OutputStreamAndPath streamAndPath = EntropyInjector.createEntropyAware(fs, path, WriteMode.NO_OVERWRITE); out = streamAndPath.stream(); assertThat(streamAndPath.path()).isEqualTo(pathWithEntropy); } finally { FileSystemSafetyNet.closeSafetyNetAndGuardedResourcesForThread(); } // check that the safety net closed the stream assertThatThrownBy( () -> { out.write(42); out.flush(); }) .isInstanceOf(IOException.class); }
@Override public String render(String text) { if (StringUtils.isBlank(text)) { return ""; } if (regex.isEmpty() || link.isEmpty()) { Comment comment = new Comment(); comment.escapeAndAdd(text); return comment.render(); } try { Matcher matcher = Pattern.compile(regex).matcher(text); int start = 0; Comment comment = new Comment(); while (hasMatch(matcher)) { comment.escapeAndAdd(text.substring(start, matcher.start())); comment.add(dynamicLink(matcher)); start = matcher.end(); } comment.escapeAndAdd(text.substring(start)); return comment.render(); } catch (PatternSyntaxException e) { LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage()); } return text; }
@Test public void shouldReturnOriginalStringIfRegexIsIllegal() throws Exception { String link = "http://mingle05/projects/cce/cards/${ID}"; String regex = "++"; trackingTool = new DefaultCommentRenderer(link, regex); String toRender = "evo-abc: checkin message"; String result = trackingTool.render(toRender); assertThat(result, is(toRender)); }
@Override public void decorateRouteContext(final RouteContext routeContext, final QueryContext queryContext, final ShardingSphereDatabase database, final SingleRule rule, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); Collection<QualifiedTable> singleTables = getSingleTables(database, rule, routeContext, sqlStatementContext); SingleRouteEngineFactory.newInstance(singleTables, sqlStatementContext.getSqlStatement()).ifPresent(optional -> optional.route(routeContext, rule)); }
@Test void assertDecorateRouteContextWithSingleDataSource() { SingleRule rule = new SingleRule(new SingleRuleConfiguration(), DefaultDatabase.LOGIC_NAME, new H2DatabaseType(), Collections.singletonMap("foo_ds", new MockedDataSource()), Collections.emptyList()); RouteContext routeContext = new RouteContext(); routeContext.getRouteUnits().add(new RouteUnit(new RouteMapper("foo_ds", "foo_ds"), Collections.singletonList(new RouteMapper("t_order", "t_order")))); SingleSQLRouter sqlRouter = (SingleSQLRouter) OrderedSPILoader.getServices(SQLRouter.class, Collections.singleton(rule)).get(rule); sqlRouter.decorateRouteContext( routeContext, createQueryContext(), mockReadwriteSplittingDatabase(), rule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet)); Iterator<String> routedDataSourceNames = routeContext.getActualDataSourceNames().iterator(); assertThat(routedDataSourceNames.next(), is("foo_ds")); }
@Override public ByteBuf setBytes(int index, byte[] src) { setBytes(index, src, 0, src.length); return this; }
@Test public void testSetBytesAfterRelease2() { final ByteBuf buffer = buffer(); try { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().setBytes(0, buffer, 1); } }); } finally { buffer.release(); } }
@Override public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); if (avgUsage == 0) { log.warn("average max resource usage is 0"); return selectedBundlesCache; } loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } double percentOfTrafficToOffload = currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB, localData.printResourceUsage()); } return; } log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic," + " left throughput {} MByte/s ({})", broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB, (brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage()); if (localData.getBundles().size() > 1) { filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) { tryLowerBoundaryShedding(loadData, conf); } return selectedBundlesCache; }
@Test public void testNoBrokers() { LoadData loadData = new LoadData(); assertTrue(thresholdShedder.findBundlesForUnloading(loadData, conf).isEmpty()); }
static String determineFullyQualifiedClassName(Path baseDir, String basePackageName, Path classFile) { String subpackageName = determineSubpackageName(baseDir, classFile); String simpleClassName = determineSimpleClassName(classFile); return of(basePackageName, subpackageName, simpleClassName) .filter(value -> !value.isEmpty()) // default package .collect(joining(PACKAGE_SEPARATOR_STRING)); }
@Test void determineFullyQualifiedClassNameFromRootPackage() { Path baseDir = Paths.get("path", "to"); String basePackageName = ""; Path classFile = Paths.get("path", "to", "com", "example", "app", "App.class"); String fqn = ClasspathSupport.determineFullyQualifiedClassName(baseDir, basePackageName, classFile); assertEquals("com.example.app.App", fqn); }
@VisibleForTesting public boolean isConfiguredResourceName(String resourceName) { // check configured Map<String, ResourceInformation> configuredResourceTypes = ResourceUtils.getResourceTypes(); if (!configuredResourceTypes.containsKey(resourceName)) { return false; } return true; }
@Test(timeout = 30000) public void testRequestedResourceNameIsConfigured() { ResourcePluginManager rpm = new ResourcePluginManager(); String resourceName = "a.com/a"; assertThat(rpm.isConfiguredResourceName(resourceName)).isFalse(); resourceName = "cmp.com/cmp"; assertThat(rpm.isConfiguredResourceName(resourceName)).isTrue(); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenRequiredOptionIsNeverSetOnSuperInterface() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Missing required value for " + "[public abstract java.lang.String org.apache.beam." + "sdk.options.PipelineOptionsValidatorTest$Required.getObject(), \"Fake Description\"]."); PipelineOptions options = PipelineOptionsFactory.create(); PipelineOptionsValidator.validate(Required.class, options); }
public static boolean isMultiPartForm(@CheckForNull String contentType) { if (contentType == null) { return false; } for (String part : contentType.split(";")) { if ("multipart/form-data".equals(part)) { return true; } } return false; }
@Test public void testIsMultipart() { Assert.assertFalse("Wrongly identified \"multipart/form-data\"", MultipartFormDataParser.isMultiPartForm(null)); Assert.assertFalse("Wrongly identified \"multipart/form-data\"", MultipartFormDataParser.isMultiPartForm("blah")); Assert.assertTrue("Failed to identify \"multipart/form-data\"", MultipartFormDataParser.isMultiPartForm("multipart/form-data")); Assert.assertTrue("Failed to identify \"multipart/form-data\"", MultipartFormDataParser.isMultiPartForm("multipart/form-data;")); Assert.assertTrue("Failed to identify \"multipart/form-data\"", MultipartFormDataParser.isMultiPartForm("multipart/form-data; boundary=----WebKitFormBoundary8OOwv1Xp4c5XkBmD")); }
@Override public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final DelayedHttpEntityCallable<File> command = new DelayedHttpEntityCallable<File>(file) { @Override public File call(final HttpEntity entity) throws BackgroundException { // Initiate a resumable upload String location; try { location = start(file, status); } catch(InteroperabilityException e) { if(null == status.getLockId()) { throw e; } location = start(file, status.withLockId(null)); } final StoregateApiClient client = session.getClient(); try { // Upload the file final HttpPut put = new HttpPut(location); put.setEntity(entity); final String header; if(status.getLength() == 0) { // Touch header = "*/0"; } else { final HttpRange range = HttpRange.byLength(0, status.getLength()); header = String.format("%d-%d/%d", range.getStart(), range.getEnd(), status.getLength()); } put.addHeader(HttpHeaders.CONTENT_RANGE, String.format("bytes %s", header)); final HttpResponse putResponse = client.getClient().execute(put); try { switch(putResponse.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_CREATED: final File result = new JSON().getContext(FileMetadata.class).readValue( new InputStreamReader(putResponse.getEntity().getContent(), StandardCharsets.UTF_8), File.class); fileid.cache(file, result.getId()); return result; default: throw new StoregateExceptionMappingService(fileid).map("Upload {0} failed", new ApiException(putResponse.getStatusLine().getStatusCode(), putResponse.getStatusLine().getReasonPhrase(), Collections.emptyMap(), EntityUtils.toString(putResponse.getEntity())), file); } } catch(BackgroundException e) { // Cancel upload on error reply cancel(file, location); throw e; } finally { EntityUtils.consume(putResponse.getEntity()); } } catch(IOException e) { // Cancel upload on I/O failure cancel(file, location); throw new HttpExceptionMappingService().map("Upload {0} failed", e, file); } } @Override public long getContentLength() { return status.getLength(); } }; return this.write(file, status, command); }
@Test public void testWriteSingleByte() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final StoregateWriteFeature feature = new StoregateWriteFeature(session, nodeid); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(1); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final Path file = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<File> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); assertEquals(content.length, IOUtils.copyLarge(in, out)); in.close(); out.close(); assertNotNull(out.getStatus()); assertTrue(new DefaultFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new StoregateReadFeature(session, nodeid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
private RemotingCommand searchOffsetByTimestamp(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(SearchOffsetResponseHeader.class); final SearchOffsetResponseHeader responseHeader = (SearchOffsetResponseHeader) response.readCustomHeader(); final SearchOffsetRequestHeader requestHeader = (SearchOffsetRequestHeader) request.decodeCommandCustomHeader(SearchOffsetRequestHeader.class); TopicQueueMappingContext mappingContext = this.brokerController.getTopicQueueMappingManager().buildTopicQueueMappingContext(requestHeader); RemotingCommand rewriteResult = rewriteRequestForStaticTopic(requestHeader, mappingContext); if (rewriteResult != null) { return rewriteResult; } long offset = this.brokerController.getMessageStore().getOffsetInQueueByTime(requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getTimestamp(), requestHeader.getBoundaryType()); responseHeader.setOffset(offset); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testSearchOffsetByTimestamp() throws Exception { messageStore = mock(MessageStore.class); when(messageStore.getOffsetInQueueByTime(anyString(), anyInt(), anyLong(), any(BoundaryType.class))).thenReturn(Long.MIN_VALUE); when(brokerController.getMessageStore()).thenReturn(messageStore); SearchOffsetRequestHeader searchOffsetRequestHeader = new SearchOffsetRequestHeader(); searchOffsetRequestHeader.setTopic("topic"); searchOffsetRequestHeader.setQueueId(0); searchOffsetRequestHeader.setTimestamp(System.currentTimeMillis()); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.SEARCH_OFFSET_BY_TIMESTAMP, searchOffsetRequestHeader); request.addExtField("topic", "topic"); request.addExtField("queueId", "0"); request.addExtField("timestamp", System.currentTimeMillis() + ""); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public ByteBuffer fetchOnePacket() throws IOException { int readLen; ByteBuffer result = defaultBuffer; result.clear(); while (true) { headerByteBuffer.clear(); readLen = readAll(headerByteBuffer); if (readLen != PACKET_HEADER_LEN) { // remote has close this channel LOG.info("Receive packet header failed, " + "remote {} may close the channel.", remoteHostPortString); return null; } if (packetId() != sequenceId) { LOG.warn("receive packet sequence id[" + packetId() + "] want to get[" + sequenceId + "]"); throw new IOException("Bad packet sequence."); } int packetLen = packetLen(); if ((result.capacity() - result.position()) < packetLen) { // byte buffer is not enough, new one packet ByteBuffer tmp; if (packetLen < MAX_PHYSICAL_PACKET_LENGTH) { // last packet, enough to this packet is OK. tmp = ByteBuffer.allocate(packetLen + result.position()); } else { // already have packet, to allocate two packet. tmp = ByteBuffer.allocate(2 * packetLen + result.position()); } tmp.put(result.array(), 0, result.position()); result = tmp; } // read one physical packet // before read, set limit to make read only one packet result.limit(result.position() + packetLen); readLen = readAll(result); if (readLen != packetLen) { LOG.warn("Length of received packet content(" + readLen + ") is not equal with length in head.(" + packetLen + ")"); return null; } accSequenceId(); if (packetLen != MAX_PHYSICAL_PACKET_LENGTH) { result.flip(); break; } } return result; }
@Test public void testLongPacket() throws IOException { // mock new Expectations() { { channel.read((ByteBuffer) any); minTimes = 0; result = new Delegate() { int fakeRead(ByteBuffer buffer) { int maxLen = MysqlChannel.MAX_PHYSICAL_PACKET_LENGTH; MysqlSerializer serializer = MysqlSerializer.newInstance(); if (readIdx == 0) { // packet readIdx++; serializer.writeInt3(maxLen); serializer.writeInt1(packetId++); buffer.put(serializer.toArray()); return 4; } else if (readIdx == 1) { readIdx++; int readLen = buffer.remaining(); byte[] buf = new byte[readLen]; for (int i = 0; i < readLen; ++i) { buf[i] = (byte) ('a' + (i % 26)); } buffer.put(buf); return readLen; } else if (readIdx == 2) { // packet readIdx++; serializer.writeInt3(10); serializer.writeInt1(packetId++); buffer.put(serializer.toArray()); return 4; } else if (readIdx == 3) { readIdx++; int readLen = buffer.remaining(); byte[] buf = new byte[readLen]; for (int i = 0; i < readLen; ++i) { buf[i] = (byte) ('a' + (maxLen + i) % 26); } buffer.put(buf); return readLen; } return 0; } }; } }; MysqlChannel channel1 = new MysqlChannel(channel); ByteBuffer buf = channel1.fetchOnePacket(); Assert.assertEquals(MysqlChannel.MAX_PHYSICAL_PACKET_LENGTH + 10, buf.remaining()); for (int i = 0; i < 0xffffff - 1 + 10; ++i) { Assert.assertEquals('a' + (i % 26), buf.get()); } }
public static PiPipelineModel parse(URL p4InfoUrl) throws P4InfoParserException { final P4Info p4info; try { p4info = getP4InfoMessage(p4InfoUrl); } catch (IOException e) { throw new P4InfoParserException("Unable to parse protobuf " + p4InfoUrl.toString(), e); } // Generate fingerprint of the pipeline by hashing p4info file final int fingerprint; try { HashingInputStream hin = new HashingInputStream(Hashing.crc32(), p4InfoUrl.openStream()); //noinspection StatementWithEmptyBody while (hin.read() != -1) { // Do nothing. Reading all input stream to update hash. } fingerprint = hin.hash().asInt(); } catch (IOException e) { throw new P4InfoParserException("Unable to generate fingerprint " + p4InfoUrl.toString(), e); } // Start by parsing and mapping instances to to their integer P4Info IDs. // Convenient to build the table model at the end. final String architecture = parseArchitecture(p4info); // Counters. final Map<Integer, PiCounterModel> counterMap = Maps.newHashMap(); counterMap.putAll(parseCounters(p4info)); counterMap.putAll(parseDirectCounters(p4info)); // Meters. final Map<Integer, PiMeterModel> meterMap = Maps.newHashMap(); meterMap.putAll(parseMeters(p4info)); meterMap.putAll(parseDirectMeters(p4info)); // Registers. final Map<Integer, PiRegisterModel> registerMap = Maps.newHashMap(); registerMap.putAll(parseRegisters(p4info)); // Action profiles. final Map<Integer, PiActionProfileModel> actProfileMap = parseActionProfiles(p4info); // Actions. final Map<Integer, PiActionModel> actionMap = parseActions(p4info); // Controller packet metadatas. final Map<PiPacketOperationType, PiPacketOperationModel> pktOpMap = parseCtrlPktMetadatas(p4info); // Finally, parse tables. final ImmutableMap.Builder<PiTableId, PiTableModel> tableImmMapBuilder = ImmutableMap.builder(); for (Table tableMsg : p4info.getTablesList()) { final PiTableId tableId = PiTableId.of(tableMsg.getPreamble().getName()); // Parse match fields. final ImmutableMap.Builder<PiMatchFieldId, PiMatchFieldModel> tableFieldMapBuilder = ImmutableMap.builder(); for (MatchField fieldMsg : tableMsg.getMatchFieldsList()) { final PiMatchFieldId fieldId = PiMatchFieldId.of(fieldMsg.getName()); tableFieldMapBuilder.put( fieldId, new P4MatchFieldModel(fieldId, isFieldString(p4info, fieldMsg.getTypeName().getName()) ? P4MatchFieldModel.BIT_WIDTH_UNDEFINED : fieldMsg.getBitwidth(), mapMatchFieldType(fieldMsg.getMatchType()))); } // Retrieve action models by inter IDs. final ImmutableMap.Builder<PiActionId, PiActionModel> tableActionMapBuilder = ImmutableMap.builder(); tableMsg.getActionRefsList().stream() .map(ActionRef::getId) .map(actionMap::get) .forEach(actionModel -> tableActionMapBuilder.put(actionModel.id(), actionModel)); // Retrieve direct meters by integer IDs. final ImmutableMap.Builder<PiMeterId, PiMeterModel> tableMeterMapBuilder = ImmutableMap.builder(); tableMsg.getDirectResourceIdsList() .stream() .map(meterMap::get) // Direct resource ID might be that of a counter. // Filter out missed mapping. .filter(Objects::nonNull) .forEach(meterModel -> tableMeterMapBuilder.put(meterModel.id(), meterModel)); // Retrieve direct counters by integer IDs. final ImmutableMap.Builder<PiCounterId, PiCounterModel> tableCounterMapBuilder = ImmutableMap.builder(); tableMsg.getDirectResourceIdsList() .stream() .map(counterMap::get) // As before, resource ID might be that of a meter. // Filter out missed mapping. .filter(Objects::nonNull) .forEach(counterModel -> tableCounterMapBuilder.put(counterModel.id(), counterModel)); // Check if table supports one-shot only boolean oneShotOnly = isAnnotationPresent(ONE_SHOT_ONLY_ANNOTATION, tableMsg.getPreamble()); tableImmMapBuilder.put( tableId, new P4TableModel( PiTableId.of(tableMsg.getPreamble().getName()), tableMsg.getImplementationId() == 0 ? PiTableType.DIRECT : PiTableType.INDIRECT, actProfileMap.get(tableMsg.getImplementationId()), tableMsg.getSize(), tableCounterMapBuilder.build(), tableMeterMapBuilder.build(), !tableMsg.getIdleTimeoutBehavior() .equals(Table.IdleTimeoutBehavior.NO_TIMEOUT), tableFieldMapBuilder.build(), tableActionMapBuilder.build(), actionMap.get(tableMsg.getConstDefaultActionId()), tableMsg.getIsConstTable(), oneShotOnly)); } // Get a map with proper PI IDs for some of those maps we created at the beginning. ImmutableMap<PiCounterId, PiCounterModel> counterImmMap = ImmutableMap.copyOf( counterMap.values().stream() .collect(Collectors.toMap(PiCounterModel::id, c -> c))); ImmutableMap<PiMeterId, PiMeterModel> meterImmMap = ImmutableMap.copyOf( meterMap.values().stream() .collect(Collectors.toMap(PiMeterModel::id, m -> m))); ImmutableMap<PiRegisterId, PiRegisterModel> registerImmMap = ImmutableMap.copyOf( registerMap.values().stream() .collect(Collectors.toMap(PiRegisterModel::id, r -> r))); ImmutableMap<PiActionProfileId, PiActionProfileModel> actProfileImmMap = ImmutableMap.copyOf( actProfileMap.values().stream() .collect(Collectors.toMap(PiActionProfileModel::id, a -> a))); return new P4PipelineModel( tableImmMapBuilder.build(), counterImmMap, meterImmMap, registerImmMap, actProfileImmMap, ImmutableMap.copyOf(pktOpMap), architecture, fingerprint); }
@Test public void testParse() throws Exception { // Generate two PiPipelineModels from the same p4Info file PiPipelineModel model = P4InfoParser.parse(p4InfoUrl); PiPipelineModel sameAsModel = P4InfoParser.parse(p4InfoUrl); // Check equality new EqualsTester().addEqualityGroup(model, sameAsModel).testEquals(); // Generate a P4Info object from the file final P4Info p4info; try { p4info = getP4InfoMessage(p4InfoUrl); } catch (IOException e) { throw new P4InfoParserException("Unable to parse protobuf " + p4InfoUrl.toString(), e); } List<Table> tableMsgs = p4info.getTablesList(); PiTableId table0Id = PiTableId.of(tableMsgs.get(0).getPreamble().getName()); PiTableId wcmpTableId = PiTableId.of(tableMsgs.get(1).getPreamble().getName()); PiTableId wcmpTableOneShotId = PiTableId.of(tableMsgs.get(2).getPreamble().getName()); //parse tables PiTableModel table0Model = model.table(table0Id).orElse(null); PiTableModel wcmpTableModel = model.table(wcmpTableId).orElse(null); PiTableModel wcmpTableOneShotModel = model.table(wcmpTableOneShotId).orElse(null); PiTableModel table0Model2 = sameAsModel.table(table0Id).orElse(null); PiTableModel wcmpTableModel2 = sameAsModel.table(wcmpTableId).orElse(null); new EqualsTester().addEqualityGroup(table0Model, table0Model2) .addEqualityGroup(wcmpTableModel, wcmpTableModel2).testEquals(); // Check existence assertThat("model parsed value is null", table0Model, notNullValue()); assertThat("model parsed value is null", wcmpTableModel, notNullValue()); assertThat("model parsed value is null", wcmpTableOneShotModel, notNullValue()); assertThat("Incorrect size for table0 size", table0Model.maxSize(), is(equalTo(DEFAULT_MAX_TABLE_SIZE))); assertThat("Incorrect size for wcmp_table size", wcmpTableModel.maxSize(), is(equalTo(DEFAULT_MAX_TABLE_SIZE))); assertThat("Incorrect size for wcmp_table_one_shot size", wcmpTableOneShotModel.maxSize(), is(equalTo(DEFAULT_MAX_TABLE_SIZE))); // Check one-shot annotation assertThat("error parsing one-shot annotation", wcmpTableModel.oneShotOnly(), is(false)); assertThat("error parsing one-shot annotation", wcmpTableOneShotModel.oneShotOnly(), is(true)); // Check matchFields List<MatchField> matchFieldList = tableMsgs.get(0).getMatchFieldsList(); List<PiMatchFieldModel> piMatchFieldList = new ArrayList<>(); for (MatchField matchFieldIter : matchFieldList) { MatchField.MatchType matchType = matchFieldIter.getMatchType(); PiMatchType piMatchType; switch (matchType) { case EXACT: piMatchType = PiMatchType.EXACT; break; case LPM: piMatchType = PiMatchType.LPM; break; case TERNARY: piMatchType = PiMatchType.TERNARY; break; case RANGE: piMatchType = PiMatchType.RANGE; break; default: Assert.fail(); return; } piMatchFieldList.add(new P4MatchFieldModel(PiMatchFieldId.of(matchFieldIter.getName()), matchFieldIter.getBitwidth(), piMatchType)); } // Check MatchFields size assertThat("Incorrect size for matchFields", table0Model.matchFields().size(), is(equalTo(9))); // Check if matchFields are in order assertThat("Incorrect order for matchFields", table0Model.matchFields(), IsIterableContainingInOrder.contains( piMatchFieldList.get(0), piMatchFieldList.get(1), piMatchFieldList.get(2), piMatchFieldList.get(3), piMatchFieldList.get(4), piMatchFieldList.get(5), piMatchFieldList.get(6), piMatchFieldList.get(7), piMatchFieldList.get(8))); assertThat("Incorrect size for matchFields", wcmpTableModel.matchFields().size(), is(equalTo(1))); // check if matchFields are in order matchFieldList = tableMsgs.get(1).getMatchFieldsList(); assertThat("Incorrect order for matchFields", wcmpTableModel.matchFields(), IsIterableContainingInOrder.contains( new P4MatchFieldModel(PiMatchFieldId.of(matchFieldList.get(0).getName()), matchFieldList.get(0).getBitwidth(), PiMatchType.EXACT))); //check table0 actionsRefs List<ActionRef> actionRefList = tableMsgs.get(0).getActionRefsList(); assertThat("Incorrect size for actionRefs", actionRefList.size(), is(equalTo(4))); //create action instances PiActionId actionId = PiActionId.of("set_egress_port"); PiActionParamId piActionParamId = PiActionParamId.of("port"); int bitWitdth = 9; PiActionParamModel actionParamModel = new P4ActionParamModel(piActionParamId, bitWitdth); ImmutableMap<PiActionParamId, PiActionParamModel> params = new ImmutableMap.Builder<PiActionParamId, PiActionParamModel>() .put(piActionParamId, actionParamModel).build(); PiActionModel setEgressPortAction = new P4ActionModel(actionId, params); actionId = PiActionId.of("send_to_cpu"); PiActionModel sendToCpuAction = new P4ActionModel(actionId, new ImmutableMap.Builder<PiActionParamId, PiActionParamModel>().build()); actionId = PiActionId.of("_drop"); PiActionModel dropAction = new P4ActionModel(actionId, new ImmutableMap.Builder<PiActionParamId, PiActionParamModel>().build()); actionId = PiActionId.of("NoAction"); PiActionModel noAction = new P4ActionModel(actionId, new ImmutableMap.Builder<PiActionParamId, PiActionParamModel>().build()); actionId = PiActionId.of("table0_control.set_next_hop_id"); piActionParamId = PiActionParamId.of("next_hop_id"); bitWitdth = 16; actionParamModel = new P4ActionParamModel(piActionParamId, bitWitdth); params = new ImmutableMap.Builder<PiActionParamId, PiActionParamModel>() .put(piActionParamId, actionParamModel).build(); PiActionModel setNextHopIdAction = new P4ActionModel(actionId, params); //check table0 actions assertThat("action dose not match", table0Model.actions(), IsIterableContainingInAnyOrder.containsInAnyOrder( setEgressPortAction, sendToCpuAction, setNextHopIdAction, dropAction)); //check wcmp_table actions assertThat("actions dose not match", wcmpTableModel.actions(), IsIterableContainingInAnyOrder.containsInAnyOrder( setEgressPortAction, noAction)); PiActionModel table0DefaultAction = table0Model.constDefaultAction().orElse(null); new EqualsTester().addEqualityGroup(table0DefaultAction, dropAction).testEquals(); // Check existence assertThat("model parsed value is null", table0DefaultAction, notNullValue()); //parse action profiles PiTableId tableId = PiTableId.of("wcmp_control.wcmp_table"); ImmutableSet<PiTableId> tableIds = new ImmutableSet.Builder<PiTableId>().add(tableId).build(); PiActionProfileId actionProfileId = PiActionProfileId.of("wcmp_control.wcmp_selector"); PiActionProfileModel wcmpSelector3 = new P4ActionProfileModel(actionProfileId, tableIds, true, DEFAULT_MAX_ACTION_PROFILE_SIZE, DEFAULT_MAX_GROUP_SIZE); PiActionProfileModel wcmpSelector = model.actionProfiles(actionProfileId).orElse(null); PiActionProfileModel wcmpSelector2 = sameAsModel.actionProfiles(actionProfileId).orElse(null); new EqualsTester().addEqualityGroup(wcmpSelector, wcmpSelector2, wcmpSelector3).testEquals(); // Check existence assertThat("model parsed value is null", wcmpSelector, notNullValue()); assertThat("Incorrect value for actions profiles", model.actionProfiles(), containsInAnyOrder(wcmpSelector)); // ActionProfiles size assertThat("Incorrect size for action profiles", model.actionProfiles().size(), is(equalTo(1))); //parse counters PiCounterModel ingressPortCounterModel = model.counter(PiCounterId.of("port_counters_ingress.ingress_port_counter")).orElse(null); PiCounterModel egressPortCounterModel = model.counter(PiCounterId.of("port_counters_egress.egress_port_counter")).orElse(null); PiCounterModel table0CounterModel = model.counter(PiCounterId.of("table0_control.table0_counter")).orElse(null); PiCounterModel wcmpTableCounterModel = model.counter(PiCounterId.of("wcmp_control.wcmp_table_counter")).orElse(null); PiCounterModel ingressPortCounterModel2 = sameAsModel.counter(PiCounterId.of("port_counters_ingress.ingress_port_counter")).orElse(null); PiCounterModel egressPortCounterModel2 = sameAsModel.counter(PiCounterId.of("port_counters_egress.egress_port_counter")).orElse(null); PiCounterModel table0CounterModel2 = sameAsModel.counter(PiCounterId.of("table0_control.table0_counter")).orElse(null); PiCounterModel wcmpTableCounterModel2 = sameAsModel.counter(PiCounterId.of("wcmp_control.wcmp_table_counter")).orElse(null); new EqualsTester() .addEqualityGroup(ingressPortCounterModel, ingressPortCounterModel2) .addEqualityGroup(egressPortCounterModel, egressPortCounterModel2) .addEqualityGroup(table0CounterModel, table0CounterModel2) .addEqualityGroup(wcmpTableCounterModel, wcmpTableCounterModel2) .testEquals(); assertThat("model parsed value is null", ingressPortCounterModel, notNullValue()); assertThat("model parsed value is null", egressPortCounterModel, notNullValue()); assertThat("model parsed value is null", table0CounterModel, notNullValue()); assertThat("model parsed value is null", wcmpTableCounterModel, notNullValue()); //Parse meters Collection<PiMeterModel> meterModel = model.meters(); Collection<PiMeterModel> meterModel2 = sameAsModel.meters(); assertThat("model parsed meter collection should be empty", meterModel.isEmpty(), is(true)); assertThat("model parsed meter collection should be empty", meterModel2.isEmpty(), is(true)); //parse packet operations PiPacketOperationModel packetInOperationalModel = model.packetOperationModel(PiPacketOperationType.PACKET_IN).orElse(null); PiPacketOperationModel packetOutOperationalModel = model.packetOperationModel(PiPacketOperationType.PACKET_OUT).orElse(null); PiPacketOperationModel packetInOperationalModel2 = sameAsModel.packetOperationModel(PiPacketOperationType.PACKET_IN).orElse(null); PiPacketOperationModel packetOutOperationalModel2 = sameAsModel.packetOperationModel(PiPacketOperationType.PACKET_OUT).orElse(null); new EqualsTester() .addEqualityGroup(packetInOperationalModel, packetInOperationalModel2) .addEqualityGroup(packetOutOperationalModel, packetOutOperationalModel2) .testEquals(); // Check existence assertThat("model parsed value is null", packetInOperationalModel, notNullValue()); assertThat("model parsed value is null", packetOutOperationalModel, notNullValue()); }
@Override public Iterator<Integer> iterator() { return new Iterator<Integer>() { private int current = from; @Override public boolean hasNext() { return current < to; } @Override public Integer next() { if (!hasNext()) throw new NoSuchElementException(); return current++; } }; }
@Test void testIterator() { RangeSet rangeSet = new RangeSet(5, 10); Iterator<Integer> iterator = rangeSet.iterator(); for (int i = 5; i < 10; i++) { assertTrue(iterator.hasNext()); assertEquals(i, iterator.next()); } assertFalse(iterator.hasNext()); assertThrows(NoSuchElementException.class, iterator::next); }
@Override public int updateAllNotifyMessageRead(Long userId, Integer userType) { return notifyMessageMapper.updateListRead(userId, userType); }
@Test public void testUpdateAllNotifyMessageRead() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setReadStatus(false); o.setReadTime(null); o.setTemplateParams(randomTemplateParams()); }); notifyMessageMapper.insert(dbNotifyMessage); // 测试 userId 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L))); // 测试 userType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 readStatus 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true))); // 准备参数 Long userId = 1L; Integer userType = UserTypeEnum.ADMIN.getValue(); // 调用 int updateCount = notifyMessageService.updateAllNotifyMessageRead(userId, userType); // 断言 assertEquals(1, updateCount); NotifyMessageDO notifyMessage = notifyMessageMapper.selectById(dbNotifyMessage.getId()); assertTrue(notifyMessage.getReadStatus()); assertNotNull(notifyMessage.getReadTime()); }
@Override public int hashCode() { int result = key != null ? key.hashCode() : 0; result = 31 * result + (value != null ? value.hashCode() : 0); result = 31 * result + (int) (ttlMillis ^ (ttlMillis >>> 32)); return result; }
@Test public void testHashCode() { assertEquals(replicatedRecord.hashCode(), replicatedRecord.hashCode()); assertEquals(replicatedRecord.hashCode(), replicatedRecordSameAttributes.hashCode()); assumeDifferentHashCodes(); assertNotEquals(replicatedRecord.hashCode(), replicatedRecordOtherKey.hashCode()); assertNotEquals(replicatedRecord.hashCode(), replicatedRecordOtherValue.hashCode()); assertNotEquals(replicatedRecord.hashCode(), replicatedRecordOtherTtl.hashCode()); }
@Override public boolean isRegisterByteSizeObserverCheap(T value) { return windowCoder.isRegisterByteSizeObserverCheap(value); }
@Test public void testIsRegisterByteSizeObserverCheap() { TimestampPrefixingWindowCoder<CustomWindow> coder1 = TimestampPrefixingWindowCoder.of(CustomWindowCoder.of(true, true)); assertThat(coder1.isRegisterByteSizeObserverCheap(CUSTOM_WINDOW_LIST.get(0)), equalTo(true)); TimestampPrefixingWindowCoder<CustomWindow> coder2 = TimestampPrefixingWindowCoder.of(CustomWindowCoder.of(true, false)); assertThat(coder2.isRegisterByteSizeObserverCheap(CUSTOM_WINDOW_LIST.get(0)), equalTo(false)); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testIsNull() { Expression expr = resolve(Expressions.$("field1").isNull()); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr); assertThat(actual).isPresent(); UnboundPredicate<Object> expected = org.apache.iceberg.expressions.Expressions.isNull("field1"); assertPredicatesMatch(expected, actual.get()); }
static List<RawMetric> constructMetricsList(ObjectName jmxMetric, MBeanAttributeInfo[] attributes, Object[] attrValues) { String domain = fixIllegalChars(jmxMetric.getDomain()); LinkedHashMap<String, String> labels = getLabelsMap(jmxMetric); String firstLabel = labels.keySet().iterator().next(); String firstLabelValue = fixIllegalChars(labels.get(firstLabel)); labels.remove(firstLabel); //removing first label since it's value will be in name List<RawMetric> result = new ArrayList<>(attributes.length); for (int i = 0; i < attributes.length; i++) { String attrName = fixIllegalChars(attributes[i].getName()); convertNumericValue(attrValues[i]).ifPresent(convertedValue -> { String name = String.format("%s_%s_%s", domain, firstLabelValue, attrName); var metric = RawMetric.create(name, labels, convertedValue); result.add(metric); }); } return result; }
@Test void convertsJmxMetricsAccordingToJmxExporterFormat() throws Exception { List<RawMetric> metrics = JmxMetricsFormatter.constructMetricsList( new ObjectName( "kafka.server:type=Some.BrokerTopic-Metrics,name=BytesOutPer-Sec,topic=test,some-lbl=123"), new MBeanAttributeInfo[] { createMbeanInfo("FifteenMinuteRate"), createMbeanInfo("Mean"), createMbeanInfo("Calls-count"), createMbeanInfo("SkipValue"), }, new Object[] { 123.0, 100.0, 10L, "string values not supported" } ); Assertions.assertThat(metrics).hasSize(3); assertMetricsEqual( RawMetric.create( "kafka_server_Some_BrokerTopic_Metrics_FifteenMinuteRate", Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"), BigDecimal.valueOf(123.0) ), metrics.get(0) ); assertMetricsEqual( RawMetric.create( "kafka_server_Some_BrokerTopic_Metrics_Mean", Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"), BigDecimal.valueOf(100.0) ), metrics.get(1) ); assertMetricsEqual( RawMetric.create( "kafka_server_Some_BrokerTopic_Metrics_Calls_count", Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"), BigDecimal.valueOf(10) ), metrics.get(2) ); }
public static void mkdir( final HybridFile parentFile, @NonNull final HybridFile file, final Context context, final boolean rootMode, @NonNull final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private DataUtils dataUtils = DataUtils.getInstance(); private Function<DocumentFile, Void> safCreateDirectory = input -> { if (input != null && input.isDirectory()) { boolean result = false; try { result = input.createDirectory(file.getName(context)) != null; } catch (Exception e) { LOG.warn("Failed to make directory", e); } errorCallBack.done(file, result); } else errorCallBack.done(file, false); return null; }; @Override protected Void doInBackground(Void... params) { // checking whether filename is valid or a recursive call possible if (!Operations.isFileNameValid(file.getName(context))) { errorCallBack.invalidName(file); return null; } if (file.exists()) { errorCallBack.exists(file); return null; } // Android data directory, prohibit create directory if (file.isAndroidDataDir()) { errorCallBack.done(file, false); return null; } if (file.isSftp() || file.isFtp()) { file.mkdir(context); /* FIXME: throw Exceptions from HybridFile.mkdir() so errorCallback can throw Exceptions here */ errorCallBack.done(file, true); return null; } if (file.isSmb()) { try { file.getSmbFile(2000).mkdirs(); } catch (SmbException e) { LOG.warn("failed to make smb directories", e); errorCallBack.done(file, false); return null; } errorCallBack.done(file, file.exists()); return null; } if (file.isOtgFile()) { if (checkOtgNewFileExists(file, context)) { errorCallBack.exists(file); return null; } safCreateDirectory.apply(OTGUtil.getDocumentFile(parentFile.getPath(), context, false)); return null; } if (file.isDocumentFile()) { if (checkDocumentFileNewFileExists(file, context)) { errorCallBack.exists(file); return null; } safCreateDirectory.apply( OTGUtil.getDocumentFile( parentFile.getPath(), SafRootHolder.getUriRoot(), context, OpenMode.DOCUMENT_FILE, false)); return null; } else if (file.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { cloudStorageDropbox.createFolder(CloudUtil.stripPath(OpenMode.DROPBOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { cloudStorageBox.createFolder(CloudUtil.stripPath(OpenMode.BOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { cloudStorageOneDrive.createFolder( CloudUtil.stripPath(OpenMode.ONEDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { cloudStorageGdrive.createFolder(CloudUtil.stripPath(OpenMode.GDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else { if (file.isLocal() || file.isRoot()) { int mode = checkFolder(new File(file.getParent(context)), context); if (mode == 2) { errorCallBack.launchSAF(file); return null; } if (mode == 1 || mode == 0) MakeDirectoryOperation.mkdir(file.getFile(), context); if (!file.exists() && rootMode) { file.setMode(OpenMode.ROOT); if (file.exists()) errorCallBack.exists(file); try { MakeDirectoryCommand.INSTANCE.makeDirectory( file.getParent(context), file.getName(context)); } catch (ShellNotRunningException e) { LOG.warn("failed to make directory in local filesystem", e); } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); } return null; } }.executeOnExecutor(executor); }
@Test public void testMkdir() throws InterruptedException { File newFolder = new File(storageRoot, "test"); HybridFile newFolderHF = new HybridFile(OpenMode.FILE, newFolder.getAbsolutePath()); CountDownLatch waiter = new CountDownLatch(1); Operations.mkdir( newFolderHF, newFolderHF, ApplicationProvider.getApplicationContext(), false, new AbstractErrorCallback() { @Override public void done(HybridFile hFile, boolean b) { waiter.countDown(); } }); waiter.await(); assertTrue(newFolder.exists()); }
public static String sign(final String algorithmName, final String key, final String data) { if (Objects.isNull(key) || Objects.isNull(data)) { throw new NullPointerException("Key or data is null."); } return Optional.ofNullable(SIGN_FUNCTION_MAP.get(algorithmName)) .orElseThrow(() -> new UnsupportedOperationException("unsupported sign algorithm:" + algorithmName)) .sign(key, data); }
@Test public void testGenerateMd5Sign() { assertThat(SignUtils.sign(SignUtils.SIGN_MD5, "test", "a1b2"), is("7aa98f7d67f8e4730e2d1d3902295ce6")); }
private QueryStatisticsItem(Builder builder) { this.customQueryId = builder.customQueryId; this.queryId = builder.queryId; this.user = builder.user; this.sql = builder.sql; this.db = builder.db; this.connId = builder.connId; this.queryStartTime = builder.queryStartTime; this.fragmentInstanceInfos = builder.fragmentInstanceInfos; this.queryProfile = builder.queryProfile; this.executionId = builder.executionId; this.warehouseName = builder.warehouseName; }
@Test void testQueryStatisticsItem() { String queryId = "123"; String warehouseName = "wh1"; final QueryStatisticsItem item = new QueryStatisticsItem.Builder() .customQueryId("abc") .queryId("123") .warehouseName("wh1").build(); Assert.assertEquals("wh1", item.getWarehouseName()); Assert.assertEquals("abc", item.getCustomQueryId()); Assert.assertEquals("123", item.getQueryId()); }
@Override public int hashCode() { int result = 1; result = 31 * result + Objects.hashCode(username); result = 31 * result + Objects.hashCode(getPasswordValue()); result = 31 * result + Objects.hashCode(getSocketAddress().get()); result = 31 * result + Boolean.hashCode(getNonProxyHostsValue()); result = 31 * result + Objects.hashCode(httpHeaders.get()); result = 31 * result + Objects.hashCode(getType()); result = 31 * result + Long.hashCode(connectTimeoutMillis); return result; }
@Test void equalProxyProvidersNoAuth() { assertThat(createNoAuthProxy(ADDRESS_1)).isEqualTo(createNoAuthProxy(ADDRESS_1)); assertThat(createNoAuthProxy(ADDRESS_1).hashCode()).isEqualTo(createNoAuthProxy(ADDRESS_1).hashCode()); }
public GRNDescriptor getDescriptor(GRN grn) { final GRNDescriptorProvider provider = descriptorProviders.get(grn.grnType()); if (provider == null) { throw new IllegalStateException("Missing GRN descriptor provider for GRN type: " + grn.type()); } return provider.get(grn); }
@Test void getDescriptor() { final ImmutableMap<GRNType, GRNDescriptorProvider> providers = ImmutableMap.of( user.grnType(), grn -> GRNDescriptor.create(grn, "Jane Doe") ); final GRNDescriptorService service = new GRNDescriptorService(providers); assertThat(service.getDescriptor(user)).satisfies(descriptor -> { assertThat(descriptor.grn()).isEqualTo(user); assertThat(descriptor.title()).isEqualTo("Jane Doe"); }); }
@Override public void validateDeptList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得科室信息 Map<Long, DeptDO> deptMap = getDeptMap(ids); // 校验 ids.forEach(id -> { DeptDO dept = deptMap.get(id); if (dept == null) { throw exception(DEPT_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) { throw exception(DEPT_NOT_ENABLE, dept.getName()); } }); }
@Test public void testValidateDeptList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> deptService.validateDeptList(ids), DEPT_NOT_FOUND); }
public boolean match(String left, String right) { if (left != null && left.startsWith("\"") && left.endsWith("\"")) { left = left.substring(1, left.length() - 1); } if (right != null && right.startsWith("\"") && right.endsWith("\"")) { right = right.substring(1, right.length() - 1); } return Objects.equals(left, right); }
@Test public void longShouldEqualWhenLargerThan128() { Long a = 334L; Long b = 334L; boolean match = new StringMatch().match(a, b); assertTrue(match); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path f : files.keySet()) { try { if(f.isDirectory()) { new FoldersApi(new BoxApiClient(session.getClient())).deleteFoldersId(fileid.getFileId(f), null, true); } else { new FilesApi(new BoxApiClient(session.getClient())).deleteFilesId(fileid.getFileId(f), null); } } catch(ApiException e) { throw new BoxExceptionMappingService(fileid).map("Cannot delete {0}", e, f); } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFound() throws Exception { final Path test = new Path(new DefaultHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final BoxFileidProvider fileid = new BoxFileidProvider(session); new BoxDeleteFeature(session, fileid).delete(Collections.singletonMap(test, new TransferStatus()), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static ByteBuf wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_BUFFER; } return new UnpooledHeapByteBuf(ALLOC, array, array.length); }
@Test public void testEquals() { ByteBuf a, b; // Different length. a = wrappedBuffer(new byte[] { 1 }); b = wrappedBuffer(new byte[] { 1, 2 }); assertFalse(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Same content, same firstIndex, short length. a = wrappedBuffer(new byte[] { 1, 2, 3 }); b = wrappedBuffer(new byte[] { 1, 2, 3 }); assertTrue(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Same content, different firstIndex, short length. a = wrappedBuffer(new byte[] { 1, 2, 3 }); b = wrappedBuffer(new byte[] { 0, 1, 2, 3, 4 }, 1, 3); assertTrue(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Different content, same firstIndex, short length. a = wrappedBuffer(new byte[] { 1, 2, 3 }); b = wrappedBuffer(new byte[] { 1, 2, 4 }); assertFalse(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Different content, different firstIndex, short length. a = wrappedBuffer(new byte[] { 1, 2, 3 }); b = wrappedBuffer(new byte[] { 0, 1, 2, 4, 5 }, 1, 3); assertFalse(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Same content, same firstIndex, long length. a = wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); b = wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); assertTrue(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Same content, different firstIndex, long length. a = wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); b = wrappedBuffer(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 1, 10); assertTrue(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Different content, same firstIndex, long length. a = wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); b = wrappedBuffer(new byte[] { 1, 2, 3, 4, 6, 7, 8, 5, 9, 10 }); assertFalse(ByteBufUtil.equals(a, b)); a.release(); b.release(); // Different content, different firstIndex, long length. a = wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); b = wrappedBuffer(new byte[] { 0, 1, 2, 3, 4, 6, 7, 8, 5, 9, 10, 11 }, 1, 10); assertFalse(ByteBufUtil.equals(a, b)); a.release(); b.release(); }
public void write(Page page) throws IOException { requireNonNull(page, "page is null"); checkState(!closed, "writer is closed"); if (page.getPositionCount() == 0) { return; } checkArgument(page.getChannelCount() == columnWriters.size()); while (page != null) { int chunkRows = min(page.getPositionCount(), DEFAULT_ROW_GROUP_MAX_ROW_COUNT); Page chunk = page.getRegion(0, chunkRows); // avoid chunk with huge logical size while (chunkRows > 1 && chunk.getLogicalSizeInBytes() > chunkMaxLogicalBytes) { chunkRows /= 2; chunk = chunk.getRegion(0, chunkRows); } // Remove chunk from current page if (chunkRows < page.getPositionCount()) { page = page.getRegion(chunkRows, page.getPositionCount() - chunkRows); } else { page = null; } writeChunk(chunk); } }
@Test public void testRowGroupFlushInterleavedColumnWriterFallbacks() { temporaryDirectory = createTempDir(); parquetFile = new File(temporaryDirectory, randomUUID().toString()); List<Type> types = ImmutableList.of(BIGINT, INTEGER, VARCHAR, BOOLEAN); List<String> names = ImmutableList.of("col_1", "col_2", "col_3", "col_4"); ParquetWriterOptions parquetWriterOptions = ParquetWriterOptions.builder() .setMaxPageSize(DataSize.succinctBytes(1000)) .setMaxBlockSize(DataSize.succinctBytes(15000)) .setMaxDictionaryPageSize(DataSize.succinctBytes(1000)) .build(); try (ParquetWriter parquetWriter = createParquetWriter(parquetFile, types, names, parquetWriterOptions, CompressionCodecName.UNCOMPRESSED)) { Random rand = new Random(); for (int pageIdx = 0; pageIdx < 10; pageIdx++) { int pageRowCount = 100; PageBuilder pageBuilder = new PageBuilder(pageRowCount, types); for (int rowIdx = 0; rowIdx < pageRowCount; rowIdx++) { // maintain col_1's dictionary size approximately half of raw data BIGINT.writeLong(pageBuilder.getBlockBuilder(0), pageIdx * 100 + rand.nextInt(50)); INTEGER.writeLong(pageBuilder.getBlockBuilder(1), rand.nextInt(100000000)); VARCHAR.writeString(pageBuilder.getBlockBuilder(2), UUID.randomUUID().toString()); BOOLEAN.writeBoolean(pageBuilder.getBlockBuilder(3), rand.nextBoolean()); pageBuilder.declarePosition(); } parquetWriter.write(pageBuilder.build()); } } catch (Exception e) { fail("Should not fail, but throw an exception as follows:", e); } }
public static int calDefaultReplicationNum() throws UserException { if (RunMode.isSharedDataMode()) { return 1; } int defaultReplicationNum = Math.min(3, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getTotalBackendNumber()); if (defaultReplicationNum == 0) { throw new NoAliveBackendException("No alive backend"); } return defaultReplicationNum; }
@Test public void testCalDefaultReplicationNum() throws Exception { try { AutoInferUtil.calDefaultReplicationNum(); } catch (UserException e) { Assert.assertTrue(e instanceof NoAliveBackendException && e.getMessage().contains("No alive backend")); } new MockUp<RunMode>() { @Mock public RunMode getCurrentRunMode() { return RunMode.SHARED_DATA; } }; Assert.assertEquals(1, AutoInferUtil.calDefaultReplicationNum()); }
private double calcDistance(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier) { LongArrayList nodes = way.getNodes(); // every way has at least two nodes according to our acceptWay function GHPoint3D prevPoint = coordinateSupplier.getCoordinate(nodes.get(0)); if (prevPoint == null) return Double.NaN; boolean is3D = !Double.isNaN(prevPoint.ele); double distance = 0; for (int i = 1; i < nodes.size(); i++) { GHPoint3D point = coordinateSupplier.getCoordinate(nodes.get(i)); if (point == null) return Double.NaN; if (Double.isNaN(point.ele) == is3D) throw new IllegalStateException("There should be elevation data for either all points or no points at all. OSM way: " + way.getId()); distance += is3D ? distCalc.calcDist3D(prevPoint.lat, prevPoint.lon, prevPoint.ele, point.lat, point.lon, point.ele) : distCalc.calcDist(prevPoint.lat, prevPoint.lon, point.lat, point.lon); prevPoint = point; } return distance; }
@Test public void test_edgeDistanceWhenFirstNodeIsMissing_issue2221() { GraphHopper hopper = new GraphHopperFacade("test-osm10.xml").importOrLoad(); BaseGraph graph = hopper.getBaseGraph(); assertEquals(3, graph.getNodes()); assertEquals(3, graph.getEdges()); AllEdgesIterator iter = graph.getAllEdges(); while (iter.next()) { assertEquals(DistanceCalcEarth.DIST_EARTH.calcDistance(iter.fetchWayGeometry(FetchMode.ALL)), iter.getDistance(), 1.e-3); } assertEquals(35.612, graph.getEdgeIteratorState(0, Integer.MIN_VALUE).getDistance(), 1.e-3); assertEquals(75.256, graph.getEdgeIteratorState(1, Integer.MIN_VALUE).getDistance(), 1.e-3); assertEquals(143.332, graph.getEdgeIteratorState(2, Integer.MIN_VALUE).getDistance(), 1.e-3); }
public boolean isProactiveSupportEnabled() { if (properties == null) { return false; } return getMetricsEnabled(); }
@Test public void isProactiveSupportEnabledHTTPOnly() { // Given Properties serverProperties = new Properties(); serverProperties.setProperty( BaseSupportConfig.CONFLUENT_SUPPORT_METRICS_ENDPOINT_INSECURE_ENABLE_CONFIG, "true" ); BaseSupportConfig supportConfig = new TestSupportConfig(serverProperties); // When/Then assertTrue(supportConfig.isProactiveSupportEnabled()); }
public static Iterator<Integer> lineOffsetIterator(String input) { return new LineOffsetIterator(input); }
@Test public void offsets() { assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\nbar\n"))) .containsExactly(0, 4, 8); assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\nbar"))).containsExactly(0, 4); assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\rbar\r"))) .containsExactly(0, 4, 8); assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\rbar"))).containsExactly(0, 4); assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\r\nbar\r\n"))) .containsExactly(0, 5, 10); assertThat(ImmutableList.copyOf(Newlines.lineOffsetIterator("foo\r\nbar"))) .containsExactly(0, 5); }
@ScalarOperator(CAST) @SqlType(StandardTypes.BIGINT) public static long castToBigint(@SqlType(StandardTypes.TINYINT) long value) { return value; }
@Test public void testCastToBigint() { assertFunction("cast(TINYINT'37' as bigint)", BIGINT, 37L); assertFunction("cast(TINYINT'17' as bigint)", BIGINT, 17L); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void needJDK112() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/crash-report/need_jdk112.txt")), CrashReportAnalyzer.Rule.NEED_JDK11); }
public static boolean isEqualOrMatches(String arg, String pat, Perl5Matcher matcher, PatternCacheLRU cache){ return arg.equals(pat) || matcher.matches(arg,cache.getPattern(pat,Perl5Compiler.READ_ONLY_MASK)); }
@Test public void testisEqualOrMatches() throws Exception { assertTrue(HtmlParsingUtils.isEqualOrMatches("http:", "http:")); assertFalse(HtmlParsingUtils.isEqualOrMatches("http:", "htTp:")); assertTrue(HtmlParsingUtils.isEqualOrMatches("http:", "ht+p:")); assertFalse(HtmlParsingUtils.isEqualOrMatches("ht+p:", "http:")); }
public static AggregationUnit create(final AggregationType type, final boolean isDistinct) { switch (type) { case MAX: return new ComparableAggregationUnit(false); case MIN: return new ComparableAggregationUnit(true); case SUM: return isDistinct ? new DistinctSumAggregationUnit() : new AccumulationAggregationUnit(); case COUNT: return isDistinct ? new DistinctCountAggregationUnit() : new AccumulationAggregationUnit(); case AVG: return isDistinct ? new DistinctAverageAggregationUnit() : new AverageAggregationUnit(); case BIT_XOR: return new BitXorAggregationUnit(); default: throw new UnsupportedSQLOperationException(type.name()); } }
@Test void assertCreateDistinctSumAggregationUnit() { assertThat(AggregationUnitFactory.create(AggregationType.SUM, true), instanceOf(DistinctSumAggregationUnit.class)); }
public DistroFailedTaskHandler findFailedTaskHandler(String type) { return failedTaskHandlerMap.get(type); }
@Test void testFindFailedTaskHandler() { DistroFailedTaskHandler distroFailedTaskHandler = componentHolder.findFailedTaskHandler(type); assertEquals(this.distroFailedTaskHandler, distroFailedTaskHandler); }
@Override public int size() { return items.size(); }
@Test public void testSize() throws Exception { //Checks size values throughout addition and removal for (int i = 0; i < 10; i++) { map.put(i, i); assertEquals("The map size is wrong.", i + 1, map.size()); } for (int i = 0; i < 10; i++) { map.remove(i); assertEquals("The map size is wrong.", 9 - i, map.size()); } }
String getUserLogin() { return configuration.get(USER_LOGIN_ATTRIBUTE).orElseThrow(() -> new IllegalArgumentException("User login attribute is missing")); }
@Test public void fail_to_get_user_login_attribute_when_null() { assertThatThrownBy(() -> underTest.getUserLogin()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("User login attribute is missing"); }