focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static void closeSilently( final ServerWebSocket webSocket, final int code, final String message) { try { final ImmutableMap<String, String> finalMessage = ImmutableMap.of( "error", message != null ? message : "" ); final String json = ApiJsonMapper.INSTANCE.get().writeValueAsString(finalMessage); webSocket .writeFinalTextFrame(json, r -> { }) .close((short) code, truncate(message)); } catch (final Exception e) { LOG.info("Exception caught closing websocket", e); } }
@Test public void shouldTruncateMessageLongerThanCloseReasonAllows() throws Exception { // Given: final String reason = "A long message that is longer than the maximum size that the " + "CloseReason class will allow-------------------------------------------------"; assertThat("invalid test", reason.getBytes(UTF_8).length, greaterThan(123)); // When: SessionUtil.closeSilently(websocket, INVALID_MESSAGE_TYPE.code(), reason); // Then: verify(websocket).writeFinalTextFrame(any(String.class), any(Handler.class)); verify(websocket).close(codeCaptor.capture(), reasonCaptor.capture()); assertThat(reasonCaptor.getValue(), is( "A long message that is longer than the maximum size that the CloseReason class " + "will allow-------------------------------...")); assertThat(reasonCaptor.getValue().getBytes(UTF_8).length, is(123)); }
static <K, V> CacheConfig<K, V> getCacheConfig(HazelcastClientInstanceImpl client, String cacheName, String simpleCacheName) { ClientMessage request = CacheGetConfigCodec.encodeRequest(cacheName, simpleCacheName); try { int partitionId = client.getClientPartitionService().getPartitionId(cacheName); ClientInvocation clientInvocation = new ClientInvocation(client, request, cacheName, partitionId); Future<ClientMessage> future = clientInvocation.invoke(); ClientMessage responseMessage = future.get(); SerializationService serializationService = client.getSerializationService(); CacheConfigHolder cacheConfigHolder = CacheGetConfigCodec.decodeResponse(responseMessage); if (cacheConfigHolder == null) { return null; } return cacheConfigHolder.asCacheConfig(serializationService); } catch (Exception e) { throw rethrow(e); } }
@Test public void testGetCacheConfig() { CacheConfig<String, String> cacheConfig = getCacheConfig(client, CACHE_NAME, CACHE_NAME); assertNull(cacheConfig); }
private static String getRemoteIp(final HttpServletRequest request) { String xForwardedFor = request.getHeader(X_FORWARDED_FOR); if (!StringUtils.isBlank(xForwardedFor)) { return xForwardedFor.split(X_FORWARDED_FOR_SPLIT_SYMBOL)[0].trim(); } String header = request.getHeader(X_REAL_IP); return StringUtils.isBlank(header) ? request.getRemoteAddr() : header; }
@Test public void testGetRemoteIp() { this.httpServletRequest.addHeader(X_FORWARDED_FOR, "x-forwarded-for,test"); this.httpServletRequest.addHeader(X_REAL_IP, "127.0.0.1"); String xForwardedFor = httpServletRequest.getHeader(X_FORWARDED_FOR); assertNotNull(xForwardedFor); assertTrue(StringUtils.isNotBlank(xForwardedFor)); assertEquals("x-forwarded-for,test", xForwardedFor); assertEquals("127.0.0.1", httpServletRequest.getHeader(X_REAL_IP)); }
public static void setOutputPath(Job job, Path outputDir) { try { outputDir = outputDir.getFileSystem(job.getConfiguration()).makeQualified( outputDir); } catch (IOException e) { // Throw the IOException as a RuntimeException to be compatible with MR1 throw new RuntimeException(e); } job.getConfiguration().set(FileOutputFormat.OUTDIR, outputDir.toString()); }
@Test public void testSetOutputPathException() throws Exception { Job job = Job.getInstance(); try { // Give it an invalid filesystem so it'll throw an exception FileOutputFormat.setOutputPath(job, new Path("foo:///bar")); fail("Should have thrown a RuntimeException with an IOException inside"); } catch (RuntimeException re) { assertTrue(re.getCause() instanceof IOException); } }
@Override public int compare(Object o1, Object o2) { if (o1 == null && o2 == null) { return 0; // o1 == o2 } if (o1 == null) { return -1; // o1 < o2 } if (o2 == null) { return 1; // o1 > o2 } return nonNullCompare(o1, o2); }
@Test public void nullArgTwo() { assertTrue("null two", cmp.compare(1, null) > 0); }
void markFulfilled(ExecutionSlotSharingGroup group, AllocationID allocationId) { pendingRequests.remove(group); fulfilledRequests.put(group, allocationId); }
@Test void testMarkFulfilled() { SharingPhysicalSlotRequestBulk bulk = createBulk(); AllocationID allocationId = new AllocationID(); bulk.markFulfilled(SG1, allocationId); assertThat(bulk.getPendingRequests()).contains(RP2); assertThat(bulk.getAllocationIdsOfFulfilledRequests()).contains(allocationId); }
public SmppCommand createSmppCommand(SMPPSession session, Exchange exchange) { SmppCommandType commandType = SmppCommandType.fromExchange(exchange); return commandType.createCommand(session, configuration); }
@Test public void createSmppCancelSmCommand() { SMPPSession session = new SMPPSession(); Exchange exchange = new DefaultExchange(new DefaultCamelContext()); exchange.getIn().setHeader(SmppConstants.COMMAND, "CancelSm"); SmppCommand command = binding.createSmppCommand(session, exchange); assertTrue(command instanceof SmppCancelSmCommand); }
public void delete(final String key) { client.getKVClient().delete(ByteSequence.from(key, StandardCharsets.UTF_8)); }
@Test public void delete() { CompletableFuture<DeleteResponse> delete = mock(CompletableFuture.class); when(client.getKVClient().delete(ByteSequence.from(TEST_KEY, StandardCharsets.UTF_8))).thenReturn(delete); etcdClient.delete(TEST_KEY); verify(client.getKVClient(), times(1)).delete(any(ByteSequence.class)); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Delete file %s to be replaced with %s", renamed, file)); } new DropboxDeleteFeature(session).delete(Collections.singletonMap(renamed, status), connectionCallback, callback); } final RelocationResult result = new DbxUserFilesRequests(session.getClient(file)).moveV2(containerService.getKey(file), containerService.getKey(renamed)); return renamed.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(result.getMetadata())); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Cannot move {0}", e, file); } }
@Test public void testMoveNotFound() throws Exception { final DropboxMoveFeature feature = new DropboxMoveFeature(session); final Path home = new DefaultHomeFinderService(session).find(); final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); assertThrows(NotfoundException.class, () -> feature.move(test, new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback())); }
@PostConstruct public void applyPluginMetadata() { String pluginId = getPluginId(); for (ConfigurationProperty configurationProperty : configuration) { SCMMetadataStore scmMetadataStore = SCMMetadataStore.getInstance(); if (scmMetadataStore.getConfigurationMetadata(pluginId) != null) { boolean isSecureProperty = scmMetadataStore.hasOption(pluginId, configurationProperty.getConfigurationKey().getName(), SCMConfiguration.SECURE); configurationProperty.handleSecureValueConfiguration(isSecureProperty); } } }
@Test void shouldMakeConfigurationSecureBasedOnMetadata() throws Exception { GoCipher goCipher = new GoCipher(); //meta data of SCM SCMConfigurations scmConfiguration = new SCMConfigurations(); scmConfiguration.add(new SCMConfiguration("key1").with(SECURE, true)); scmConfiguration.add(new SCMConfiguration("key2").with(SECURE, false)); SCMMetadataStore.getInstance().addMetadataFor("plugin-id", scmConfiguration, null); /*secure property is set based on metadata*/ ConfigurationProperty secureProperty = new ConfigurationProperty(new ConfigurationKey("key1"), new ConfigurationValue("value1"), null, goCipher); ConfigurationProperty nonSecureProperty = new ConfigurationProperty(new ConfigurationKey("key2"), new ConfigurationValue("value2"), null, goCipher); SCM scm = SCMMother.create("scm-id", "scm-name", "plugin-id", "1.0", new Configuration(secureProperty, nonSecureProperty)); scm.applyPluginMetadata(); //assert SCM properties assertThat(secureProperty.isSecure()).isTrue(); assertThat(secureProperty.getEncryptedConfigurationValue()).isNotNull(); assertThat(secureProperty.getEncryptedValue()).isEqualTo(goCipher.encrypt("value1")); assertThat(nonSecureProperty.isSecure()).isFalse(); assertThat(nonSecureProperty.getValue()).isEqualTo("value2"); }
@Override public void start() { File dbHome = new File(getRequiredSetting(PATH_DATA.getKey())); if (!dbHome.exists()) { dbHome.mkdirs(); } startServer(dbHome); }
@Test public void start_fails_with_IAE_if_property_Data_Path_is_not_set() { assertThatThrownBy(() -> underTest.start()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Missing property " + PATH_DATA.getKey()); }
@Override public CompletableFuture<Boolean> isCompatible(String schemaId, SchemaData schema, SchemaCompatibilityStrategy strategy) { try { SchemaDataValidator.validateSchemaData(schema); } catch (InvalidSchemaDataException e) { return FutureUtil.failedFuture(e); } return service.isCompatible(schemaId, schema, strategy); }
@Test public void testIsCompatibleWithGoodSchemaData() { String schemaId = "test-schema-id"; SchemaCompatibilityStrategy strategy = SchemaCompatibilityStrategy.FULL; CompletableFuture<Boolean> future = new CompletableFuture<>(); when(underlyingService.isCompatible(eq(schemaId), any(SchemaData.class), eq(strategy))) .thenReturn(future); SchemaData schemaData = SchemaData.builder() .type(SchemaType.BOOLEAN) .data(new byte[0]) .build(); assertSame(future, service.isCompatible(schemaId, schemaData, strategy)); verify(underlyingService, times(1)) .isCompatible(eq(schemaId), same(schemaData), eq(strategy)); }
@Override public @Nullable Duration getExpiryForAccess() { return access; }
@Test public void access() { assertThat(eternal.getExpiryForAccess()).isEqualTo(Duration.ETERNAL); assertThat(temporal.getExpiryForAccess()).isEqualTo(Duration.ONE_MINUTE); }
public void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { if ( isJettyMode() && !request.getContextPath().startsWith( CONTEXT_PATH ) ) { return; } if ( log.isDebug() ) { logDebug( BaseMessages.getString( PKG, "GetStatusServlet.StatusRequested" ) ); } response.setStatus( HttpServletResponse.SC_OK ); // We always reply in XML... // response.setContentType( "text/xml" ); response.setCharacterEncoding( Const.XML_ENCODING ); PrintStream out = new PrintStream( response.getOutputStream() ); out.print( XMLHandler.getXMLHeader( Const.XML_ENCODING ) ); out.println( XMLHandler.openTag( XML_TAG_SLAVESERVER_DETECTIONS ) ); if ( getDetections() != null ) { for ( SlaveServerDetection slaveServer : getDetections() ) { try { slaveServer.getSlaveServer().getStatus(); } catch ( Exception e ) { slaveServer.setActive( false ); slaveServer.setLastInactiveDate( new Date() ); } out.println( slaveServer.getXML() ); } } out.println( XMLHandler.closeTag( XML_TAG_SLAVESERVER_DETECTIONS ) ); }
@Test public void testUpdateActivityStatusInDoGet() throws Exception { LogChannelInterface log = mock( LogChannelInterface.class ); ServletOutputStream outputStream = mock( ServletOutputStream.class ); SlaveServerDetection activeServerDetection = mock( SlaveServerDetection.class ); SlaveServerDetection inactiveServerDetection = mock( SlaveServerDetection.class ); SlaveServer activeSlaveServer = mock( SlaveServer.class ); SlaveServer inactiveSlaveServer = mock( SlaveServer.class ); servlet.log = log; List<SlaveServerDetection> detections = new ArrayList<>(); detections.add( activeServerDetection ); detections.add( inactiveServerDetection ); doReturn( false ).when( log ).isDebug(); doReturn( outputStream ).when( response ).getOutputStream(); doReturn( detections ).when( servlet ).getDetections(); doReturn( activeSlaveServer ).when( activeServerDetection ).getSlaveServer(); doReturn( inactiveSlaveServer ).when( inactiveServerDetection ).getSlaveServer(); doThrow( new Exception() ).when( inactiveSlaveServer ).getStatus(); doCallRealMethod().when( servlet ).doGet( request, response ); servlet.doGet( request, response ); verify( activeSlaveServer ).getStatus(); verify( activeServerDetection, never() ).setActive( false ); verify( activeServerDetection, never() ).setLastInactiveDate( any() ); verify( activeServerDetection ).getXML(); verify( inactiveSlaveServer ).getStatus(); verify( inactiveServerDetection ).setActive( false ); verify( inactiveServerDetection ).setLastInactiveDate( any() ); verify( inactiveServerDetection ).getXML(); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testParseNewStyleResourceMemoryNegativeWithMoreSpaces() throws Exception { expectNegativeValueOfResource("memory"); parseResourceConfigValue(" vcores = 2 , memory-mb = -5120 "); }
@Override public Long dbSize(RedisClusterNode node) { return execute(node, RedisCommands.DBSIZE); }
@Test public void testDbSize() { RedisClusterNode master = getFirstMaster(); Long size = connection.dbSize(master); assertThat(size).isZero(); }
public static Optional<Metric> fromStringRepresentation(final String metricString) { if (StringUtils.isBlank(metricString)) { return Optional.empty(); } final String[] split = metricString.split(":"); final String functionName = split[0]; final String fieldName = split.length > 1 ? split[1] : null; return Optional.of(new Metric(functionName, fieldName)); }
@Test void parsesMetricsCorrectly() { Optional<Metric> count = Metric.fromStringRepresentation("count:stars"); assertThat(count) .hasValue(new Metric("count", "stars")); count = Metric.fromStringRepresentation("avg:salary"); assertThat(count) .hasValue(new Metric("avg", "salary")); }
public int getBlobServerPort() { final int blobServerPort = KubernetesUtils.parsePort(flinkConfig, BlobServerOptions.PORT); checkArgument(blobServerPort > 0, "%s should not be 0.", BlobServerOptions.PORT.key()); return blobServerPort; }
@Test void testGetBlobServerPortException2() { flinkConfig.set(BlobServerOptions.PORT, "0"); assertThatThrownBy( () -> kubernetesJobManagerParameters.getBlobServerPort(), "Should fail with an exception.") .satisfies( cause -> assertThat(cause) .isInstanceOf(IllegalArgumentException.class) .hasMessageMatching( BlobServerOptions.PORT.key() + " should not be 0.")); }
@Override public UniquenessLevel getIndexUniquenessLevel() { return UniquenessLevel.TABLE_LEVEL; }
@Test void assertGetIndexUniquenessLevel() { assertThat(uniquenessLevelProvider.getIndexUniquenessLevel(), is(UniquenessLevel.TABLE_LEVEL)); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void jvm321() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/jvm_32bit2.txt")), CrashReportAnalyzer.Rule.JVM_32BIT); }
public static <K, V> HashMap<K, V> toMap(Iterable<Entry<K, V>> entryIter) { final HashMap<K, V> map = new HashMap<>(); if (isNotEmpty(entryIter)) { for (Entry<K, V> entry : entryIter) { map.put(entry.getKey(), entry.getValue()); } } return map; }
@Test public void testToMap() { final Map<String, Car> expectedMap = new HashMap<>(); final Car bmw = new Car("123", "bmw"); expectedMap.put("123", bmw); final Car benz = new Car("456", "benz"); expectedMap.put("456", benz); final Map<String, Car> testMap = IterUtil.toMap(Arrays.asList(bmw, benz), Car::getCarNumber); assertEquals(expectedMap, testMap); }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldDoNothingForNonDropStatements() { // Given: final ConfiguredStatement<ListProperties> listProperties = givenStatement("LIST", new ListProperties(Optional.empty())); // When: final ConfiguredStatement<ListProperties> injected = deleteInjector.inject(listProperties); // Then: assertThat(injected, is(sameInstance(listProperties))); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetDelete() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); context.commitOffset("foo", "bar", 0, 100L, 0); group.setSubscribedTopics(Optional.of(Collections.emptySet())); context.testOffsetDeleteWith("foo", "bar", 0, Errors.NONE); assertFalse(context.hasOffset("foo", "bar", 0)); }
public static Set<String> findKeywordsFromCrashReport(String crashReport) { Matcher matcher = CRASH_REPORT_STACK_TRACE_PATTERN.matcher(crashReport); Set<String> result = new HashSet<>(); if (matcher.find()) { for (String line : matcher.group("stacktrace").split("\\n")) { Matcher lineMatcher = STACK_TRACE_LINE_PATTERN.matcher(line); if (lineMatcher.find()) { String[] method = lineMatcher.group("method").split("\\."); for (int i = 0; i < method.length - 2; i++) { if (PACKAGE_KEYWORD_BLACK_LIST.contains(method[i])) { continue; } result.add(method[i]); } Matcher moduleMatcher = STACK_TRACE_LINE_MODULE_PATTERN.matcher(line); if (moduleMatcher.find()) { for (String module : moduleMatcher.group("tokens").split(",")) { String[] split = module.split(":"); if (split.length >= 2 && "xf".equals(split[0])) { if (PACKAGE_KEYWORD_BLACK_LIST.contains(split[1])) { continue; } result.add(split[1]); } } } } } } return result; }
@Test public void ic2() throws IOException { assertEquals( Collections.singleton("ic2"), CrashReportAnalyzer.findKeywordsFromCrashReport(loadLog("/crash-report/mod/ic2.txt"))); }
@Override public HttpResponse send(HttpRequest httpRequest) throws IOException { return send(httpRequest, null); }
@Test public void send_overridden_userAgent() throws IOException, InterruptedException { String responseBody = "test response"; mockWebServer.enqueue( new MockResponse() .setResponseCode(HttpStatus.OK.code()) .setHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString()) .setBody(responseBody)); mockWebServer.start(); final String userAgentOverride = "User Agent In Override"; HttpClientCliOptions cliOptions = new HttpClientCliOptions(); cliOptions.userAgent = userAgentOverride; HttpClientConfigProperties configProperties = new HttpClientConfigProperties(); cliOptions.trustAllCertificates = configProperties.trustAllCertificates = true; HttpClient httpClient = Guice.createInjector( new AbstractModule() { @Override protected void configure() { install(new HttpClientModule.Builder().build()); bind(HttpClientCliOptions.class).toInstance(cliOptions); bind(HttpClientConfigProperties.class).toInstance(configProperties); } }) .getInstance(HttpClient.class); HttpUrl baseUrl = mockWebServer.url("/"); httpClient.send(get(baseUrl.toString()).withEmptyHeaders().build()); assertThat(mockWebServer.takeRequest().getHeader(USER_AGENT)).isEqualTo(userAgentOverride); }
@Override public boolean isPluginOfType(final String extension, String pluginId) { return goPluginOSGiFramework.hasReferenceFor(GoPlugin.class, pluginId, extension); }
@Test void shouldSayAPluginIsNotOfAnExtensionTypeWhenReferenceIsNotFound() { final String pluginThatDoesNotImplement = "plugin-that-does-not-implement"; String extensionType = "extension-type"; when(goPluginOSGiFramework.hasReferenceFor(GoPlugin.class, pluginThatDoesNotImplement, extensionType)).thenReturn(false); DefaultPluginManager pluginManager = new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, pluginRequestProcessorRegistry, systemEnvironment, pluginLoader); boolean pluginIsOfExtensionType = pluginManager.isPluginOfType(extensionType, pluginThatDoesNotImplement); assertThat(pluginIsOfExtensionType).isFalse(); verify(goPluginOSGiFramework).hasReferenceFor(GoPlugin.class, pluginThatDoesNotImplement, extensionType); verify(goPluginOSGiFramework, never()).doOn(eq(GoPlugin.class), eq(pluginThatDoesNotImplement), eq(extensionType), any(ActionWithReturn.class)); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testSetterAnnotatedWithJsonIgnore() throws Exception { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Expected setter for property [value] to not be marked with @JsonIgnore on [" + "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$SetterWithJsonIgnore]"); PipelineOptionsFactory.as(SetterWithJsonIgnore.class); }
public Frequency add(Frequency value) { return new Frequency(this.frequency + value.frequency); }
@Test public void testAdd() { Frequency low = Frequency.ofMHz(100); Frequency high = Frequency.ofGHz(1); Frequency expected = Frequency.ofMHz(1100); assertThat(low.add(high), is(expected)); }
@Override public boolean encode( @NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) { GifDrawable drawable = resource.get(); Transformation<Bitmap> transformation = drawable.getFrameTransformation(); boolean isTransformed = !(transformation instanceof UnitTransformation); if (isTransformed && options.get(ENCODE_TRANSFORMATION)) { return encodeTransformedToFile(drawable, file); } else { return writeDataDirect(drawable.getBuffer(), file); } }
@Test public void testAdvancesDecoderBeforeAttemptingToGetFirstFrame() { when(gifEncoder.start(any(OutputStream.class))).thenReturn(true); when(decoder.getFrameCount()).thenReturn(1); when(decoder.getNextFrame()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888)); encoder.encode(resource, file, options); InOrder order = inOrder(decoder); order.verify(decoder).advance(); order.verify(decoder).getNextFrame(); }
@VisibleForTesting static String generateFile(Set<QualifiedVersion> versionsSet, String template) throws IOException { final Set<QualifiedVersion> versions = new TreeSet<>(reverseOrder()); // For the purpose of generating the index, snapshot versions are the same as released versions. Prerelease versions are not. versionsSet.stream().map(v -> v.isSnapshot() ? v.withoutQualifier() : v).forEach(versions::add); final List<String> includeVersions = versions.stream().map(QualifiedVersion::toString).collect(Collectors.toList()); final Map<String, Object> bindings = new HashMap<>(); bindings.put("versions", versions); bindings.put("includeVersions", includeVersions); return TemplateUtils.render(template, bindings); }
@Test public void generateFile_rendersCorrectMarkup() throws Exception { // given: final Set<QualifiedVersion> versions = Stream.of( "8.0.0-alpha1", "8.0.0-beta2", "8.0.0-rc3", "8.0.0", "8.0.1", "8.0.2", "8.1.0", "8.1.1", "8.2.0-SNAPSHOT" ).map(QualifiedVersion::of).collect(Collectors.toSet()); final String template = getResource("/templates/release-notes-index.asciidoc"); final String expectedOutput = getResource( "/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.generateFile.asciidoc" ); // when: final String actualOutput = ReleaseNotesIndexGenerator.generateFile(versions, template); // then: assertThat(actualOutput, equalTo(expectedOutput)); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@SuppressWarnings("deprecation") // make sure deprecated method is not called @Test public void shouldNotReplaceExistingSchemaForSchemaRegistryEnabledFormatCreateSource() throws Exception { // Given: givenStatement("CREATE STREAM sink (f1 VARCHAR) WITH (kafka_topic='expectedName', key_format='AVRO', value_format='AVRO', partitions=1);"); doReturn(schemaMetadata).when(schemaRegistryClient).getLatestSchemaMetadata("expectedName-value"); doReturn(schemaMetadata).when(schemaRegistryClient).getLatestSchemaMetadata("expectedName-key"); // When: injector.inject(statement); // Then: verify(schemaRegistryClient, never()).register(any(), any(ParsedSchema.class)); verify(schemaRegistryClient, never()).register(any(), any(Schema.class)); }
@Override public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor( List<ExecutionAttemptID> executionAttemptIds) { final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>(); executionAttemptIds.forEach( executionId -> vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId)); checkState( vertexIdToExecutionId.size() == executionAttemptIds.size(), "SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions"); final List<ExecutionVertexID> vertexIds = executionAttemptIds.stream() .map(ExecutionAttemptID::getExecutionVertexId) .collect(Collectors.toList()); return allocateSlotsForVertices(vertexIds).stream() .collect( Collectors.toMap( vertexAssignment -> vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment -> new ExecutionSlotAssignment( vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment.getLogicalSlotFuture()))); }
@Test void failLogicalSlotsIfPhysicalSlotIsFailed() { final TestingPhysicalSlotRequestBulkChecker bulkChecker = new TestingPhysicalSlotRequestBulkChecker(); AllocationContext context = AllocationContext.newBuilder() .addGroup(EV1, EV2) .withBulkChecker(bulkChecker) .withPhysicalSlotProvider( TestingPhysicalSlotProvider.createWithFailingPhysicalSlotCreation( new FlinkException("test failure"))) .build(); final Map<ExecutionAttemptID, ExecutionSlotAssignment> allocatedSlots = context.allocateSlotsFor(EV1, EV2); for (ExecutionSlotAssignment allocatedSlot : allocatedSlots.values()) { assertThat(allocatedSlot.getLogicalSlotFuture()).isCompletedExceptionally(); } assertThat(bulkChecker.getBulk().getPendingRequests()).isEmpty(); final Set<SlotRequestId> requests = context.getSlotProvider().getRequests().keySet(); assertThat(context.getSlotProvider().getCancellations().keySet()).isEqualTo(requests); }
public static ByteBuffer[] generateBufferWithHeaders( List<Tuple2<Buffer, Integer>> bufferWithIndexes) { ByteBuffer[] bufferWithHeaders = new ByteBuffer[2 * bufferWithIndexes.size()]; for (int i = 0; i < bufferWithIndexes.size(); i++) { Buffer buffer = bufferWithIndexes.get(i).f0; setBufferWithHeader(buffer, bufferWithHeaders, 2 * i); } return bufferWithHeaders; }
@Test void testGenerateBufferWithHeaders() { int bufferBytes = 5; Buffer originalBuffer = BufferBuilderTestUtils.buildSomeBuffer(bufferBytes); ByteBuffer header = BufferReaderWriterUtil.allocatedHeaderBuffer(); BufferReaderWriterUtil.setByteChannelBufferHeader(originalBuffer, header); ByteBuffer[] byteBuffers = TieredStorageUtils.generateBufferWithHeaders( Collections.singletonList( new Tuple2<>( BufferBuilderTestUtils.buildSomeBuffer(bufferBytes), 0))); assertThat(byteBuffers).hasSize(2); assertThat(byteBuffers[0]).isEqualTo(header); assertThat(byteBuffers[1]).isEqualTo(originalBuffer.getNioBufferReadable()); }
public boolean tableExists(String dbName, String tableName) { try (Timer ignored = Tracers.watchScope(EXTERNAL, "HMS.tableExists")) { return callRPC("tableExists", String.format("Failed to get table exists [%s.%s]", dbName, tableName), dbName, tableName); } }
@Test public void testTableExists(@Mocked HiveMetaStoreClient metaStoreClient) throws TException { new Expectations() { { metaStoreClient.tableExists("hive_db", "hive_table"); result = true; } }; HiveConf hiveConf = new HiveConf(); hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), "thrift://127.0.0.1:90300"); HiveMetaClient client = new HiveMetaClient(hiveConf); Assert.assertTrue(client.tableExists("hive_db", "hive_table")); }
@Override public void trace(String msg) { logger.trace(msg); }
@Test void testTraceWithFormat2() { jobRunrDashboardLogger.trace("trace with {} {}", "format1", "format2"); verify(slfLogger).trace("trace with {} {}", "format1", "format2"); }
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) { if (left == null && right == null) { return true; } if (left == null || right == null) { return false; } if (!left.getSchema().getName().equals(right.getSchema().getName())) { return false; } extractCommonObjectSchema(left, right); return compare(left, right); }
@Test public void differentMap() { SimpleHollowDataset dataset = SimpleHollowDataset.fromClassDefinitions(Movie.class); FakeHollowSchemaIdentifierMapper idMapper = new FakeHollowSchemaIdentifierMapper(dataset); HollowObjectMapper objMapper = new HollowObjectMapper(HollowWriteStateCreator.createWithSchemas(dataset.getSchemas())); FlatRecordWriter flatRecordWriter = new FlatRecordWriter(dataset, idMapper); Movie movie1 = new Movie(); movie1.tags = new HashMap<>(); movie1.tags.put(new Tag("Type"), new TagValue("Movie")); movie1.tags.put(new Tag("Genre"), new TagValue("action")); Movie movie2 = new Movie(); movie2.tags = new HashMap<>(); movie2.tags.put(new Tag("Type"), new TagValue("Movie")); movie2.tags.put(new Tag("Genre"), new TagValue("comedy")); flatRecordWriter.reset(); objMapper.writeFlat(movie1, flatRecordWriter); FlatRecord flatRecord1 = flatRecordWriter.generateFlatRecord(); flatRecordWriter.reset(); objMapper.writeFlat(movie2, flatRecordWriter); FlatRecord flatRecord2 = flatRecordWriter.generateFlatRecord(); Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(flatRecord1), new FlatRecordTraversalObjectNode(flatRecord2))).isFalse(); Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(flatRecord2), new FlatRecordTraversalObjectNode(flatRecord1))).isFalse(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final String resourceId = fileid.getFileId(file); final UiFsModel uiFsModel = new ListResourceApi(new EueApiClient(session)).resourceResourceIdGet(resourceId, null, null, null, null, null, null, Collections.singletonList(EueAttributesFinderFeature.OPTION_DOWNLOAD), null); final HttpUriRequest request = new HttpGet(uiFsModel.getUilink().getDownloadURI()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = session.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response); default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new EueExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testRead() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(5423); createFile(fileid, file, content); assertTrue(new EueFindFeature(session, fileid).find(file)); final PathAttributes attributes = new EueAttributesFinderFeature(session, fileid).find(file); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new EueReadFeature(session, fileid).read(file, new TransferStatus(), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void addConfigDefinitionsFromBundle(Bundle bundle, List<Bundle> bundlesAdded) { try { checkAndCopyUserDefs(bundle, bundlesAdded); } catch (IOException e) { throw new IllegalArgumentException("Unable to add config definitions from bundle " + bundle.getFile().getAbsolutePath(), e); } }
@Test public void require_that_conflicting_defs_are_not_added() throws IOException { File defDir = temporaryFolder.newFolder(); IOUtils.writeFile(new File(defDir, "foo.def"), "alreadyexists", false); ConfigDefinitionDir dir = new ConfigDefinitionDir(defDir); Bundle bundle = new Bundle(new JarFile(bundleFile), bundleFile); ArrayList<Bundle> bundlesAdded = new ArrayList<>(); // Conflict with built-in config definition try { dir.addConfigDefinitionsFromBundle(bundle, bundlesAdded); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains ("The config definition with name 'bar.foo' contained in the bundle '" + bundleFileName + "' conflicts with a built-in config definition")); } bundlesAdded.add(bundle); // Conflict with another bundle Bundle bundle2 = new Bundle(new JarFile(bundleFile), bundleFile); try { dir.addConfigDefinitionsFromBundle(bundle2, bundlesAdded); } catch (IllegalArgumentException e) { assertEquals("The config definition with name 'bar.foo' contained in the bundle '" + bundleFileName + "' conflicts with the same config definition in the bundle 'com.yahoo.searcher1.jar'. Please choose a different name.", e.getMessage()); } }
String getNodePath(BaseMetadataIdentifier metadataIdentifier) { return toRootDir() + metadataIdentifier.getUniqueKey(KeyTypeEnum.PATH); }
@Test void testStoreProvider() throws ClassNotFoundException, InterruptedException { String interfaceName = "org.apache.dubbo.metadata.store.zookeeper.ZookeeperMetadataReport4TstService"; String version = "1.0.0.zk.md"; String group = null; String application = "vic.zk.md"; MetadataIdentifier providerMetadataIdentifier = storePrivider(zookeeperMetadataReport, interfaceName, version, group, application); String fileContent = zookeeperMetadataReport.zkClient.getContent( zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); fileContent = waitSeconds(fileContent, 3500, zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); Assertions.assertNotNull(fileContent); deletePath(providerMetadataIdentifier, zookeeperMetadataReport); fileContent = zookeeperMetadataReport.zkClient.getContent( zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); fileContent = waitSeconds(fileContent, 1000, zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); Assertions.assertNull(fileContent); providerMetadataIdentifier = storePrivider(zookeeperMetadataReport, interfaceName, version, group, application); fileContent = zookeeperMetadataReport.zkClient.getContent( zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); fileContent = waitSeconds(fileContent, 3500, zookeeperMetadataReport.getNodePath(providerMetadataIdentifier)); Assertions.assertNotNull(fileContent); FullServiceDefinition fullServiceDefinition = JsonUtils.toJavaObject(fileContent, FullServiceDefinition.class); Assertions.assertEquals(fullServiceDefinition.getParameters().get("paramTest"), "zkTest"); }
public static void executeWithRetry(RetryFunction function) throws Exception { executeWithRetry(maxAttempts, minDelay, function); }
@Test public void retryHealthyFunction() throws Exception { executeWithRetry(IOITHelperTest::validFunction); assertEquals("The healthy function.", message); }
@Override public String toString() { return String.join(DELIMITER, databaseName, groupName, dataSourceName); }
@Test void assertNew() { QualifiedDataSource actual = new QualifiedDataSource("test_db.test_group_name.test_ds"); assertThat(actual.getDatabaseName(), is("test_db")); assertThat(actual.getGroupName(), is("test_group_name")); assertThat(actual.getDataSourceName(), is("test_ds")); assertThat(actual.toString(), is("test_db.test_group_name.test_ds")); }
@Override public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException { return fromVersion == 0 ? upgradeToUseFetchToAndDataToFetch(oldConfiguration) : new TbPair<>(false, oldConfiguration); }
@Test public void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception { var defaultConfig = new TbGetRelatedDataNodeConfiguration().defaultConfiguration(); var node = new TbGetRelatedAttributeNode(); String oldConfig = "{\"attrMapping\":{\"serialNumber\":\"sn\"}," + "\"relationsQuery\":{\"direction\":\"FROM\",\"maxLevel\":1," + "\"filters\":[{\"relationType\":\"Contains\",\"entityTypes\":[]}]," + "\"fetchLastLevelOnly\":false}," + "\"telemetry\":false}"; JsonNode configJson = JacksonUtil.toJsonNode(oldConfig); TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson); Assertions.assertTrue(upgrade.getFirst()); Assertions.assertEquals(defaultConfig, JacksonUtil.treeToValue(upgrade.getSecond(), defaultConfig.getClass())); }
public static Properties getProperties() { return properties; }
@Test public void testGetProperties() { Properties properties = NamesrvStartup.getProperties(); Assert.assertNull(properties); }
@Override public void remove(UK key) throws Exception { delegatedState.remove(key); changeLogger.valueElementRemoved(out -> serializeKey(key, out), getCurrentNamespace()); }
@Test public void testRemoveRecorded() throws Exception { testRecorded( singletonMap("x", "y"), state -> state.remove("x"), logger -> assertTrue(logger.stateElementRemoved)); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testPreserve() { DistCpOptions options = OptionsParser.parse(new String[] { "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); options = OptionsParser.parse(new String[] { "-p", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options = OptionsParser.parse(new String[] { "-p", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options = OptionsParser.parse(new String[] { "-pbr", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options = OptionsParser.parse(new String[] { "-pbrgup", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options = OptionsParser.parse(new String[] { "-pbrgupcaxt", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL)); Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR)); Assert.assertTrue(options.shouldPreserve(FileAttribute.TIMES)); options = OptionsParser.parse(new String[] { "-pc", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options = OptionsParser.parse(new String[] { "-p", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2, options.getPreserveAttributes().size()); try { OptionsParser.parse(new String[] { "-pabcd", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target"}); Assert.fail("Invalid preserve attribute"); } catch (NoSuchElementException ignore) {} Builder builder = new DistCpOptions.Builder( new Path("hdfs://localhost:8020/source/first"), new Path("hdfs://localhost:8020/target/")); Assert.assertFalse( builder.build().shouldPreserve(FileAttribute.PERMISSION)); builder.preserve(FileAttribute.PERMISSION); Assert.assertTrue(builder.build().shouldPreserve(FileAttribute.PERMISSION)); builder.preserve(FileAttribute.PERMISSION); Assert.assertTrue(builder.build().shouldPreserve(FileAttribute.PERMISSION)); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test @SuppressWarnings("ArraysAsListPrimitiveArray") public void testCombiningIsEmpty() throws Exception { GroupingState<Integer, Integer> value = underTest.state(NAMESPACE, COMBINING_ADDR); SettableFuture<Iterable<int[]>> future = SettableFuture.create(); when(mockReader.bagFuture(eq(COMBINING_KEY), eq(STATE_FAMILY), Mockito.<Coder<int[]>>any())) .thenReturn(future); ReadableState<Boolean> result = value.isEmpty().readLater(); ArgumentCaptor<ByteString> byteString = ArgumentCaptor.forClass(ByteString.class); // Note that we do expect the third argument - the coder - to be equal to accumCoder, but that // is possibly overspecified and currently trips an issue in the SDK where identical coders are // not #equals(). // // What matters is that a future is created, hence a Windmill RPC sent. Mockito.verify(mockReader) .bagFuture(byteString.capture(), eq(STATE_FAMILY), Mockito.<Coder<int[]>>any()); assertThat(byteString.getValue(), byteStringEq(COMBINING_KEY)); waitAndSet(future, Collections.singletonList(new int[] {29}), 200); assertThat(result.read(), Matchers.is(false)); }
private boolean loadBak() { String fileName = null; try { fileName = this.configFilePath() + ".bak"; String jsonString = MixAll.file2String(fileName); if (jsonString != null && jsonString.length() > 0) { this.decode(jsonString); log.info("load " + fileName + " OK"); return true; } } catch (Exception e) { log.error("load " + fileName + " Failed", e); return false; } return true; }
@Test public void testLoadBak() throws Exception { ConfigManager testConfigManager = buildTestConfigManager(); File file = createAndWriteFile(testConfigManager.configFilePath() + ".bak"); // invoke private method "loadBak()" Method declaredMethod = ConfigManager.class.getDeclaredMethod("loadBak"); declaredMethod.setAccessible(true); Boolean loadBakResult = (Boolean) declaredMethod.invoke(testConfigManager); assertTrue(loadBakResult); file.delete(); Boolean loadBakResult2 = (Boolean) declaredMethod.invoke(testConfigManager); assertTrue(loadBakResult2); declaredMethod.setAccessible(false); }
ImmutableMap<PCollection<?>, FieldAccessDescriptor> getPCollectionFieldAccess() { return ImmutableMap.copyOf(pCollectionFieldAccess); }
@Test public void testFieldAccessTwoKnownMainInputs() { Pipeline p = Pipeline.create(); FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor(); Schema schema = Schema.of( Field.of("field1", FieldType.STRING), Field.of("field2", FieldType.STRING), Field.of("field3", FieldType.STRING)); PCollection<Row> source = p.apply(Create.of(Row.withSchema(schema).addValues("foo", "bar", "baz").build())) .setRowSchema(schema); source.apply(new FieldAccessTransform(FieldAccessDescriptor.withFieldNames("field1"))); source.apply(new FieldAccessTransform(FieldAccessDescriptor.withFieldNames("field2"))); p.traverseTopologically(fieldAccessVisitor); FieldAccessDescriptor fieldAccess = fieldAccessVisitor.getPCollectionFieldAccess().get(source); assertFalse(fieldAccess.getAllFields()); assertThat(fieldAccess.fieldNamesAccessed(), containsInAnyOrder("field1", "field2")); }
protected List<MessageExt> decodeMsgList(GetMessageResult getMessageResult, boolean deCompressBody) { List<MessageExt> foundList = new ArrayList<>(); try { List<ByteBuffer> messageBufferList = getMessageResult.getMessageBufferList(); if (messageBufferList != null) { for (int i = 0; i < messageBufferList.size(); i++) { ByteBuffer bb = messageBufferList.get(i); if (bb == null) { LOG.error("bb is null {}", getMessageResult); continue; } MessageExt msgExt = MessageDecoder.decode(bb, true, deCompressBody); if (msgExt == null) { LOG.error("decode msgExt is null {}", getMessageResult); continue; } // use CQ offset, not offset in Message msgExt.setQueueOffset(getMessageResult.getMessageQueueOffset().get(i)); foundList.add(msgExt); } } } finally { getMessageResult.release(); } return foundList; }
@Test public void decodeMsgListTest() { ByteBuffer byteBuffer = ByteBuffer.allocate(10); MappedFile mappedFile = new DefaultMappedFile(); SelectMappedBufferResult result = new SelectMappedBufferResult(0, byteBuffer, 10, mappedFile); getMessageResult.addMessage(result); Assertions.assertThatCode(() -> escapeBridge.decodeMsgList(getMessageResult, false)).doesNotThrowAnyException(); }
boolean visit(ResourceRequest rr) { Priority priority = rr.getPriority(); Resource capability = rr.getCapability(); Map<Resource, TrackerPerPriorityResource> subMap = map.get(priority); if (subMap == null) { subMap = new HashMap<>(); map.put(priority, subMap); } TrackerPerPriorityResource tracker = subMap.get(capability); if (tracker == null) { tracker = new TrackerPerPriorityResource(); subMap.put(capability, tracker); } return tracker.visit(rr.getResourceName()); }
@Test public void testVisitNodeRequestFirst() { VisitedResourceRequestTracker tracker = new VisitedResourceRequestTracker(nodeTracker); // Visit node1 first assertTrue(FIRST_CALL_FAILURE, tracker.visit(node1Request)); // Rack and ANY should return false assertFalse(NODE_VISITED + ANY_FAILURE, tracker.visit(anyRequest)); assertFalse(NODE_VISITED + RACK_FAILURE, tracker.visit(rackRequest)); // The other node should return true assertTrue(NODE_VISITED + "Different node visit failed", tracker.visit(node2Request)); }
public static void mergeParams( Map<String, ParamDefinition> params, Map<String, ParamDefinition> paramsToMerge, MergeContext context) { if (paramsToMerge == null) { return; } Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream()) .forEach( name -> { ParamDefinition paramToMerge = paramsToMerge.get(name); if (paramToMerge == null) { return; } if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) { Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name); Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name); mergeParams( baseMap, toMergeMap, MergeContext.copyWithParentMode( context, params.getOrDefault(name, paramToMerge).getMode())); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else if (paramToMerge.getType() == ParamType.STRING_MAP && paramToMerge.isLiteral()) { Map<String, String> baseMap = stringMapValueOrEmpty(params, name); Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name); baseMap.putAll(toMergeMap); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else { params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, paramToMerge.getValue())); } }); }
@Test public void testMergeAllowUpstreamChanges() throws JsonProcessingException { for (ParamMode mode : Arrays.asList(ParamMode.MUTABLE_ON_START, ParamMode.CONSTANT, ParamMode.IMMUTABLE)) { Map<String, ParamDefinition> allParams = parseParamDefMap( String.format( "{'tomerge': {'type': 'STRING','value': 'hello', 'mode': '%s'}}", mode.toString())); Map<String, ParamDefinition> paramsToMerge = parseParamDefMap( "{'tomerge': {'type': 'STRING', 'value': 'goodbye', 'source': 'SYSTEM_INJECTED'}}"); ParamsMergeHelper.mergeParams(allParams, paramsToMerge, upstreamMergeContext); } }
@Override public void beforeComponent(Component component) { if (FILE.equals(component.getType())) { anticipatedTransitions = anticipatedTransitionRepository.getAnticipatedTransitionByComponent(component); } else { anticipatedTransitions = Collections.emptyList(); } }
@Test public void givenAProjecComponent_theRepositoryIsNotQueriedForAnticipatedTransitions() { Component component = getComponent(PROJECT); when(anticipatedTransitionRepository.getAnticipatedTransitionByComponent(component)).thenReturn(Collections.emptyList()); underTest.beforeComponent(component); verifyNoInteractions(anticipatedTransitionRepository); }
public JobStatsExtended enrich(JobStats jobStats) { JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats); if (lock.tryLock()) { setFirstRelevantJobStats(latestJobStats); setJobStatsExtended(latestJobStats); setPreviousJobStats(latestJobStats); lock.unlock(); } return jobStatsExtended; }
@Test void firstRelevantJobStatsIsSetInitially() { JobStats firstJobStats = getJobStats(0L, 0L, 0L, 100L); jobStatsEnricher.enrich(firstJobStats); JobStats jobStats = Whitebox.getInternalState(jobStatsEnricher, "firstRelevantJobStats"); assertThat(jobStats).isEqualToComparingFieldByField(firstJobStats); }
public int compareNodePositions() { if(beginPath.length == 0 && endPath.length == 0) return 0; if(beginPath.length == 0) return -1; if(endPath.length == 0) return 1; return Integer.compare(beginPath[0], endPath[0]); }
@Test public void compareSameNode(){ final NodeModel parent = root(); final int compared = new NodeRelativePath(parent, parent).compareNodePositions(); assertEquals(0, compared); }
public Parameter parseAttribute( ParamDefinition paramDef, Map<String, Parameter> evaluatedParams, String workflowId, boolean ignoreError) { try { Parameter param = paramDef.toParameter(); parseWorkflowParameter(evaluatedParams, param, workflowId); return param; } catch (MaestroRuntimeException error) { if (ignoreError) { LOG.warn( "Ignore the error while parsing attribute [{}] for workflow [{}] due to ", paramDef.getName(), workflowId, error); return null; } throw new MaestroUnprocessableEntityException( "Failed to parse attribute [%s] for workflow [%s] due to %s", paramDef.getName(), workflowId, error); } }
@Test public void testParseInvalidAttributeValue() { AssertHelper.assertThrows( "unable to parse attribute", MaestroUnprocessableEntityException.class, "Failed to parse attribute [bar] for workflow [test-workflow] due to ", () -> paramEvaluator.parseAttribute( ParamDefinition.buildParamDefinition("bar", "${foo}"), Collections.singletonMap( "bat", LongParameter.builder().expression("1+2+3;").build()), "test-workflow", false)); }
@Override public ConfigData get(String path) { return get(path, Files::isRegularFile); }
@Test public void testNoTraversal() { // Check we can't escape outside the path directory Set<String> keys = toSet(asList( String.join(File.separator, "..", siblingFileName), String.join(File.separator, "..", siblingDir), String.join(File.separator, "..", siblingDir, siblingDirFileName))); ConfigData configData = provider.get(dir, keys); assertTrue(configData.data().isEmpty()); assertNull(configData.ttl()); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenRemotePluginsInstalledNoFiltering_returnsAllRemoteTsunamiPlugins() throws Exception { NetworkService fakeNetworkService1 = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("http") .build(); NetworkService fakeNetworkService2 = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(fakeNetworkService1) .addNetworkServices(fakeNetworkService2) .build(); PluginManager pluginManager = Guice.createInjector( new FakeServiceFingerprinterBootstrapModule(), new FakeRemoteVulnDetectorLoadingModule(2)) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> remotePlugins = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat( remotePlugins.stream() .map(pluginMatchingResult -> pluginMatchingResult.tsunamiPlugin().getClass())) .containsExactly(FakeRemoteVulnDetector.class, FakeRemoteVulnDetector.class); }
public QueryBuilders.QueryBuilder convert(Expr conjunct) { return visit(conjunct); }
@Test public void testTranslateRangePredicate() { SlotRef valueSlotRef = mockSlotRef("value", Type.INT); IntLiteral intLiteral = new IntLiteral(1000); Expr leExpr = new BinaryPredicate(BinaryType.LE, valueSlotRef, intLiteral); Expr ltExpr = new BinaryPredicate(BinaryType.LT, valueSlotRef, intLiteral); Expr geExpr = new BinaryPredicate(BinaryType.GE, valueSlotRef, intLiteral); Expr gtExpr = new BinaryPredicate(BinaryType.GT, valueSlotRef, intLiteral); Expr eqExpr = new BinaryPredicate(BinaryType.EQ, valueSlotRef, intLiteral); Expr neExpr = new BinaryPredicate(BinaryType.NE, valueSlotRef, intLiteral); Assert.assertEquals("{\"range\":{\"value\":{\"lt\":1000}}}", queryConverter.convert(ltExpr).toString()); Assert.assertEquals("{\"range\":{\"value\":{\"lte\":1000}}}", queryConverter.convert(leExpr).toString()); Assert.assertEquals("{\"range\":{\"value\":{\"gt\":1000}}}", queryConverter.convert(gtExpr).toString()); Assert.assertEquals("{\"range\":{\"value\":{\"gte\":1000}}}", queryConverter.convert(geExpr).toString()); Assert.assertEquals("{\"term\":{\"value\":1000}}", queryConverter.convert(eqExpr).toString()); Assert.assertEquals("{\"bool\":{\"must_not\":{\"term\":{\"value\":1000}}}}", queryConverter.convert(neExpr).toString()); }
@Override public String generateSegmentName(int sequenceId, @Nullable Object minTimeValue, @Nullable Object maxTimeValue) { if (_excludeTimeInSegmentName) { return JOINER.join(_segmentNamePrefix, _segmentNamePostfix, sequenceId >= 0 ? sequenceId : null, _appendUUIDToSegmentName ? UUID.randomUUID().toString() : null); } else { if (minTimeValue != null) { SegmentNameUtils.validatePartialOrFullSegmentName(minTimeValue.toString()); } if (maxTimeValue != null) { SegmentNameUtils.validatePartialOrFullSegmentName(maxTimeValue.toString()); } return JOINER.join(_segmentNamePrefix, minTimeValue, maxTimeValue, _segmentNamePostfix, sequenceId >= 0 ? sequenceId : null, _appendUUIDToSegmentName ? UUID.randomUUID().toString() : null); } }
@Test public void testWithMalFormedTableNameSegmentNamePostfixTimeValue() { try { new SimpleSegmentNameGenerator(MALFORMED_TABLE_NAME, SEGMENT_NAME_POSTFIX); Assert.fail(); } catch (IllegalArgumentException e) { // Expected } try { new SimpleSegmentNameGenerator(TABLE_NAME, MALFORMED_SEGMENT_NAME_POSTFIX); Assert.fail(); } catch (IllegalArgumentException e) { // Expected } try { SegmentNameGenerator segmentNameGenerator = new SimpleSegmentNameGenerator(TABLE_NAME, SEGMENT_NAME_POSTFIX); segmentNameGenerator.generateSegmentName(VALID_SEQUENCE_ID, MIN_TIME_VALUE, MALFORMED_TIME_VALUE); Assert.fail(); } catch (IllegalArgumentException e) { // Expected assertEquals(e.getMessage(), "Invalid partial or full segment name: 12|34"); } }
@Override public void processElement(RowData input, Context ctx, Collector<RowData> out) throws Exception { processFirstRowOnProcTime(input, state, out); }
@Test public void test() throws Exception { ProcTimeDeduplicateKeepFirstRowFunction func = new ProcTimeDeduplicateKeepFirstRowFunction(minTime.toMilliseconds()); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(func); testHarness.open(); testHarness.processElement(insertRecord("book", 1L, 12)); testHarness.processElement(insertRecord("book", 2L, 11)); testHarness.processElement(insertRecord("book", 1L, 13)); testHarness.close(); // Keep FirstRow in deduplicate will not send retraction List<Object> expectedOutput = new ArrayList<>(); expectedOutput.add(insertRecord("book", 1L, 12)); expectedOutput.add(insertRecord("book", 2L, 11)); assertor.assertOutputEqualsSorted("output wrong.", expectedOutput, testHarness.getOutput()); }
public static <T> T[] reverse(T[] array, final int startIndexInclusive, final int endIndexExclusive) { if (isEmpty(array)) { return array; } int i = Math.max(startIndexInclusive, 0); int j = Math.min(array.length, endIndexExclusive) - 1; T tmp; while (j > i) { tmp = array[j]; array[j] = array[i]; array[i] = tmp; j--; i++; } return array; }
@Test public void reverseTest() { int[] a = {1, 2, 3, 4}; final int[] reverse = ArrayUtil.reverse(a); assertArrayEquals(new int[]{4, 3, 2, 1}, reverse); }
@Override public CompletableFuture<List<SendResult>> sendMessage(ProxyContext ctx, AddressableMessageQueue messageQueue, List<Message> msgList, SendMessageRequestHeader requestHeader, long timeoutMillis) { byte[] body; String messageId; if (msgList.size() > 1) { requestHeader.setBatch(true); MessageBatch msgBatch = MessageBatch.generateFromList(msgList); MessageClientIDSetter.setUniqID(msgBatch); body = msgBatch.encode(); msgBatch.setBody(body); messageId = MessageClientIDSetter.getUniqID(msgBatch); } else { Message message = msgList.get(0); body = message.getBody(); messageId = MessageClientIDSetter.getUniqID(message); } RemotingCommand request = LocalRemotingCommand.createRequestCommand(RequestCode.SEND_MESSAGE, requestHeader, ctx.getLanguage()); request.setBody(body); CompletableFuture<RemotingCommand> future = new CompletableFuture<>(); SimpleChannel channel = channelManager.createInvocationChannel(ctx); InvocationContext invocationContext = new InvocationContext(future); channel.registerInvocationContext(request.getOpaque(), invocationContext); ChannelHandlerContext simpleChannelHandlerContext = channel.getChannelHandlerContext(); try { RemotingCommand response = brokerController.getSendMessageProcessor().processRequest(simpleChannelHandlerContext, request); if (response != null) { invocationContext.handle(response); channel.eraseInvocationContext(request.getOpaque()); } } catch (Exception e) { future.completeExceptionally(e); channel.eraseInvocationContext(request.getOpaque()); log.error("Failed to process sendMessage command", e); } return future.thenApply(r -> { SendResult sendResult = new SendResult(); SendMessageResponseHeader responseHeader = (SendMessageResponseHeader) r.readCustomHeader(); SendStatus sendStatus; switch (r.getCode()) { case ResponseCode.FLUSH_DISK_TIMEOUT: { sendStatus = SendStatus.FLUSH_DISK_TIMEOUT; break; } case ResponseCode.FLUSH_SLAVE_TIMEOUT: { sendStatus = SendStatus.FLUSH_SLAVE_TIMEOUT; break; } case ResponseCode.SLAVE_NOT_AVAILABLE: { sendStatus = SendStatus.SLAVE_NOT_AVAILABLE; break; } case ResponseCode.SUCCESS: { sendStatus = SendStatus.SEND_OK; break; } default: { throw new ProxyException(ProxyExceptionCode.INTERNAL_SERVER_ERROR, r.getRemark()); } } sendResult.setSendStatus(sendStatus); sendResult.setMsgId(messageId); sendResult.setMessageQueue(new MessageQueue(requestHeader.getTopic(), brokerController.getBrokerConfig().getBrokerName(), requestHeader.getQueueId())); sendResult.setQueueOffset(responseHeader.getQueueOffset()); sendResult.setTransactionId(responseHeader.getTransactionId()); sendResult.setOffsetMsgId(responseHeader.getMsgId()); return Collections.singletonList(sendResult); }); }
@Test public void testSendMessageWithException() throws Exception { Mockito.when(sendMessageProcessorMock.processRequest(Mockito.any(SimpleChannelHandlerContext.class), Mockito.any(RemotingCommand.class))) .thenThrow(new RemotingCommandException("test")); Message message = new Message("topic", "body".getBytes(StandardCharsets.UTF_8)); MessageClientIDSetter.setUniqID(message); List<Message> messagesList = Collections.singletonList(message); SendMessageRequestHeader sendMessageRequestHeader = new SendMessageRequestHeader(); CompletableFuture<List<SendResult>> future = localMessageService.sendMessage(proxyContext, null, messagesList, sendMessageRequestHeader, 1000L); ExecutionException exception = catchThrowableOfType(future::get, ExecutionException.class); assertThat(exception.getCause()).isInstanceOf(RemotingCommandException.class); }
@Override public String getConfig(String dataId, String group, long timeoutMs) throws NacosException { return getConfigInner(namespace, dataId, group, timeoutMs); }
@Test void testGetConfig403() throws NacosException { final String dataId = "1localcache403"; final String group = "2"; final String tenant = ""; MockedStatic<LocalConfigInfoProcessor> localConfigInfoProcessorMockedStatic = Mockito.mockStatic(LocalConfigInfoProcessor.class); try { //fail over null localConfigInfoProcessorMockedStatic.when(() -> LocalConfigInfoProcessor.getFailover(any(), eq(dataId), eq(group), eq(tenant))) .thenReturn(null); //form server error. final int timeout = 3000; Mockito.when(mockWoker.getServerConfig(dataId, group, "", timeout, false)) .thenThrow(new NacosException(NacosException.NO_RIGHT, "no right")); try { nacosConfigService.getConfig(dataId, group, timeout); assertTrue(false); } catch (NacosException e) { assertEquals(NacosException.NO_RIGHT, e.getErrCode()); } } finally { localConfigInfoProcessorMockedStatic.close(); } }
public static String getTempDir() { // default is user home directory String tempDir = System.getProperty("user.home"); try{ //create a temp file File temp = File.createTempFile("A0393939", ".tmp"); //Get tempropary file path String absolutePath = temp.getAbsolutePath(); tempDir = absolutePath.substring(0,absolutePath.lastIndexOf(File.separator)); }catch(IOException e){} return tempDir; }
@Test public void testTempDir() { String tempDir = NioUtils.getTempDir(); System.out.println("tempDir = " + tempDir); }
@Override public void setFetchSize(final Statement statement) throws SQLException { int configuredFetchSize = ProxyContext.getInstance() .getContextManager().getMetaDataContexts().getMetaData().getProps().<Integer>getValue(ConfigurationPropertyKey.PROXY_BACKEND_QUERY_FETCH_SIZE); statement.setFetchSize(ConfigurationPropertyKey.PROXY_BACKEND_QUERY_FETCH_SIZE.getDefaultValue().equals(String.valueOf(configuredFetchSize)) ? Integer.MIN_VALUE : configuredFetchSize); }
@Test void assertSetFetchSize() throws SQLException { Statement statement = mock(Statement.class); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); new MySQLStatementMemoryStrictlyFetchSizeSetter().setFetchSize(statement); verify(statement).setFetchSize(Integer.MIN_VALUE); }
@Override public UnboundFunction loadFunction(Identifier ident) throws NoSuchFunctionException { try { return super.loadFunction(ident); } catch (NoSuchFunctionException e) { return getSessionCatalog().loadFunction(ident); } }
@Test public void testLoadFunction() { String functionClass = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper"; // load permanent UDF in Hive via FunctionCatalog spark.sql(String.format("CREATE FUNCTION perm_upper AS '%s'", functionClass)); Assert.assertEquals("Load permanent UDF in Hive", "XYZ", scalarSql("SELECT perm_upper('xyz')")); // load temporary UDF in Hive via FunctionCatalog spark.sql(String.format("CREATE TEMPORARY FUNCTION temp_upper AS '%s'", functionClass)); Assert.assertEquals("Load temporary UDF in Hive", "XYZ", scalarSql("SELECT temp_upper('xyz')")); // TODO: fix loading Iceberg built-in functions in SessionCatalog }
@Override public DataTableType dataTableType() { return dataTableType; }
@Test void can_define_table_row_transformer_with_empty_pattern() throws NoSuchMethodException { Method method = JavaDataTableTypeDefinitionTest.class.getMethod("convert_table_row_to_string", List.class); JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookup, new String[] { "[empty]" }); assertThat(definition.dataTableType().transform(emptyTable.cells()), is(asList("convert_table_row_to_string=[a, ]", "convert_table_row_to_string=[, d]"))); }
public static boolean containsMessage(@Nullable Throwable exception, String message) { if (exception == null) { return false; } if (exception.getMessage() != null && exception.getMessage().contains(message)) { return true; } return containsMessage(exception.getCause(), message); }
@Test public void testContainsNegative() { assertThat( containsMessage( new IllegalStateException("RESOURCE_EXHAUSTED: Quota issues"), "NullPointerException")) .isFalse(); }
@Override public SETATTR3Response setattr(XDR xdr, RpcInfo info) { return setattr(xdr, getSecurityHandler(info), info.remoteAddress()); }
@Test(timeout = 60000) public void testSetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId, namenodeId); SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, EnumSet.of(SetAttrField.UID)); SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); }
@Override public ConnectorStateInfo connectorStatus(String connName) { ConnectorStatus connector = statusBackingStore.get(connName); if (connector == null) throw new NotFoundException("No status found for connector " + connName); Collection<TaskStatus> tasks = statusBackingStore.getAll(connName); ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState( connector.state().toString(), connector.workerId(), connector.trace()); List<ConnectorStateInfo.TaskState> taskStates = new ArrayList<>(); for (TaskStatus status : tasks) { taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(), status.state().toString(), status.workerId(), status.trace())); } Collections.sort(taskStates); Map<String, String> conf = rawConfig(connName); return new ConnectorStateInfo(connName, connectorState, taskStates, connectorType(conf)); }
@Test public void testConnectorStatus() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); AbstractHerder herder = testHerder(); when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); when(herder.plugins()).thenReturn(plugins); when(herder.rawConfig(connectorName)).thenReturn(Collections.singletonMap( ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName() )); when(statusStore.get(connectorName)) .thenReturn(new ConnectorStatus(connectorName, AbstractStatus.State.RUNNING, workerId, generation)); when(statusStore.getAll(connectorName)) .thenReturn(Collections.singletonList( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); ConnectorStateInfo state = herder.connectorStatus(connectorName); assertEquals(connectorName, state.name()); assertEquals(ConnectorType.SOURCE, state.type()); assertEquals("RUNNING", state.connector().state()); assertEquals(1, state.tasks().size()); assertEquals(workerId, state.connector().workerId()); ConnectorStateInfo.TaskState taskState = state.tasks().get(0); assertEquals(0, taskState.id()); assertEquals("UNASSIGNED", taskState.state()); assertEquals(workerId, taskState.workerId()); }
@Override public PollResult poll(long currentTimeMs) { if (memberId == null) { return PollResult.EMPTY; } // Send any pending acknowledgements before fetching more records. PollResult pollResult = processAcknowledgements(currentTimeMs); if (pollResult != null) { return pollResult; } if (!fetchMoreRecords || closing) { return PollResult.EMPTY; } Map<Node, ShareSessionHandler> handlerMap = new HashMap<>(); Map<String, Uuid> topicIds = metadata.topicIds(); for (TopicPartition partition : partitionsToFetch()) { Optional<Node> leaderOpt = metadata.currentLeader(partition).leader; if (!leaderOpt.isPresent()) { log.debug("Requesting metadata update for partition {} since current leader node is missing", partition); metadata.requestUpdate(false); continue; } Uuid topicId = topicIds.get(partition.topic()); if (topicId == null) { log.debug("Requesting metadata update for partition {} since topic ID is missing", partition); metadata.requestUpdate(false); continue; } Node node = leaderOpt.get(); if (nodesWithPendingRequests.contains(node.id())) { log.trace("Skipping fetch for partition {} because previous fetch request to {} has not been processed", partition, node.id()); } else { // if there is a leader and no in-flight requests, issue a new fetch ShareSessionHandler handler = handlerMap.computeIfAbsent(node, k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId))); TopicIdPartition tip = new TopicIdPartition(topicId, partition); Acknowledgements acknowledgementsToSend = fetchAcknowledgementsMap.get(tip); if (acknowledgementsToSend != null) { metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); } handler.addPartitionToFetch(tip, acknowledgementsToSend); log.debug("Added fetch request for partition {} to node {}", partition, node.id()); } } Map<Node, ShareFetchRequest.Builder> builderMap = new LinkedHashMap<>(); for (Map.Entry<Node, ShareSessionHandler> entry : handlerMap.entrySet()) { builderMap.put(entry.getKey(), entry.getValue().newShareFetchBuilder(groupId, fetchConfig)); } List<UnsentRequest> requests = builderMap.entrySet().stream().map(entry -> { Node target = entry.getKey(); log.trace("Building ShareFetch request to send to node {}", target.id()); ShareFetchRequest.Builder requestBuilder = entry.getValue(); nodesWithPendingRequests.add(target.id()); BiConsumer<ClientResponse, Throwable> responseHandler = (clientResponse, error) -> { if (error != null) { handleShareFetchFailure(target, requestBuilder.data(), error); } else { handleShareFetchSuccess(target, requestBuilder.data(), clientResponse); } }; return new UnsentRequest(requestBuilder, Optional.of(target)).whenComplete(responseHandler); }).collect(Collectors.toList()); return new PollResult(requests); }
@Test public void testUnknownTopicIdError() { buildRequestManager(); assignFromSubscribed(singleton(tp0)); assertEquals(1, sendFetches()); client.prepareResponse(fetchResponseWithTopLevelError(tip0, Errors.UNKNOWN_TOPIC_ID)); networkClientDelegate.poll(time.timer(0)); assertEmptyFetch("Should not return records on fetch error"); assertEquals(0L, metadata.timeToNextUpdate(time.milliseconds())); }
@Override public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { //if (!descriptor.equals("Lorg/pf4j/Extension;")) { if (!Type.getType(descriptor).getClassName().equals(Extension.class.getName())) { return super.visitAnnotation(descriptor, visible); } return new AnnotationVisitor(ASM_VERSION) { @Override public AnnotationVisitor visitArray(final String name) { if ("ordinal".equals(name) || "plugins".equals(name) || "points".equals(name)) { return new AnnotationVisitor(ASM_VERSION, super.visitArray(name)) { @Override public void visit(String key, Object value) { log.debug("Load annotation attribute {} = {} ({})", name, value, value.getClass().getName()); if ("ordinal".equals(name)) { extensionInfo.ordinal = Integer.parseInt(value.toString()); } else if ("plugins".equals(name)) { if (value instanceof String) { log.debug("Found plugin {}", value); extensionInfo.plugins.add((String) value); } else if (value instanceof String[]) { log.debug("Found plugins {}", Arrays.toString((String[]) value)); extensionInfo.plugins.addAll(Arrays.asList((String[]) value)); } else { log.debug("Found plugin {}", value.toString()); extensionInfo.plugins.add(value.toString()); } } else { String pointClassName = ((Type) value).getClassName(); log.debug("Found point " + pointClassName); extensionInfo.points.add(pointClassName); } super.visit(key, value); } }; } return super.visitArray(name); } }; }
@Test void visitArrayShouldHandlePointsAttribute() { ExtensionInfo extensionInfo = new ExtensionInfo("org.pf4j.asm.ExtensionInfo"); ClassVisitor extensionVisitor = new ExtensionVisitor(extensionInfo); AnnotationVisitor annotationVisitor = extensionVisitor.visitAnnotation("Lorg/pf4j/Extension;", true); AnnotationVisitor arrayVisitor = annotationVisitor.visitArray("points"); arrayVisitor.visit("key", Type.getType("Lorg/pf4j/Point;")); assertTrue(extensionInfo.getPoints().contains("org.pf4j.Point")); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void slidingWindowNamedMaterializedCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .windowedBy(TimeWindows.of(ofMillis(1))) .count(Materialized.<Object, Long, WindowStore<Bytes, byte[]>>as("count-store").withStoreType(Materialized.StoreType.IN_MEMORY)); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000001\n" + " Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false)); }
@Override @SuppressFBWarnings({ "SERVLET_HEADER_REFERER", "SERVLET_HEADER_USER_AGENT" }) public String format(ContainerRequestType servletRequest, ContainerResponseType servletResponse, SecurityContext ctx) { //LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined StringBuilder logLineBuilder = new StringBuilder(); AwsProxyRequestContext gatewayContext = (AwsProxyRequestContext)servletRequest.getAttribute(API_GATEWAY_CONTEXT_PROPERTY); HttpApiV2ProxyRequestContext httpApiContext = (HttpApiV2ProxyRequestContext)servletRequest.getAttribute(HTTP_API_CONTEXT_PROPERTY); // %h logLineBuilder.append(servletRequest.getRemoteAddr()); logLineBuilder.append(" "); // %l if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } else { logLineBuilder.append("-"); } if (gatewayContext != null && gatewayContext.getIdentity() != null && gatewayContext.getIdentity().getUserArn() != null) { logLineBuilder.append(gatewayContext.getIdentity().getUserArn()); } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // %u if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } logLineBuilder.append(" "); // %t long timeEpoch = ZonedDateTime.now(clock).toEpochSecond(); if (gatewayContext != null && gatewayContext.getRequestTimeEpoch() > 0) { timeEpoch = gatewayContext.getRequestTimeEpoch() / 1000; } else if (httpApiContext != null && httpApiContext.getTimeEpoch() > 0) { timeEpoch = httpApiContext.getTimeEpoch() / 1000; } logLineBuilder.append( dateFormat.format(ZonedDateTime.of( LocalDateTime.ofEpochSecond(timeEpoch, 0, ZoneOffset.UTC), clock.getZone()) )); logLineBuilder.append(" "); // %r logLineBuilder.append("\""); logLineBuilder.append(servletRequest.getMethod().toUpperCase(Locale.ENGLISH)); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getRequestURI()); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getProtocol()); logLineBuilder.append("\" "); // %>s logLineBuilder.append(servletResponse.getStatus()); logLineBuilder.append(" "); // %b if (servletResponse instanceof AwsHttpServletResponse) { AwsHttpServletResponse awsResponse = (AwsHttpServletResponse)servletResponse; if (awsResponse.getAwsResponseBodyBytes().length > 0) { logLineBuilder.append(awsResponse.getAwsResponseBodyBytes().length); } else { logLineBuilder.append("-"); } } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // \"%{Referer}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("referer") != null) { logLineBuilder.append(servletRequest.getHeader("referer")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); // \"%{User-agent}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("user-agent") != null) { logLineBuilder.append(servletRequest.getHeader("user-agent")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); logLineBuilder.append("combined"); return logLineBuilder.toString(); }
@Test void logsRequestTimeWhenRequestTimeEpochGreaterThanZero() { // given context.setRequestTimeEpoch(1563023494000L); // when String actual = sut.format(mockServletRequest, mockServletResponse, null); // then assertThat(actual, containsString("[13/07/2019:13:11:34Z]")); }
public Result<Boolean> publishConfig(ConfigInfo request) { Result<Boolean> checkResult = checkConnection(request); if (!checkResult.isSuccess()) { return checkResult; } String group = request.getGroup(); ConfigClient client = getConfigClient(request.getNamespace()); if (client instanceof NacosClient) { group = NacosUtils.rebuildGroup(request.getGroup()); } boolean result = client.publishConfig(request.getKey(), group, request.getContent()); if (result) { return new Result<>(ResultCodeType.SUCCESS.getCode(), null, true); } return new Result<>(ResultCodeType.MODIFY_FAIL.getCode(), null, false); }
@Test public void addConfig() { ConfigInfo configInfo = new ConfigInfo(); configInfo.setGroup(GROUP); configInfo.setKey(KEY); configInfo.setContent(CONTENT); configInfo.setPluginType(PluginType.SPRINGBOOT_REGISTRY.getPluginName()); Result<Boolean> result = configService.publishConfig(configInfo); Assert.assertTrue(result.isSuccess()); Assert.assertTrue(result.getData()); }
public String decode(byte[] val) { return codecs[0].decode(val, 0, val.length); }
@Test public void testDecodeRussianPersonName() { assertEquals(RUSSIAN_PERSON_NAME, iso8859_5().decode(RUSSIAN_PERSON_NAME_BYTE)); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test(expected = KsqlException.class) public void shouldThrowExceptionIfClassDoesntMapToSchema() { UdfUtil.getSchemaFromType(System.class); }
@Override @Transactional(rollbackFor = Exception.class) public void updateJobStatus(Long id, Integer status) throws SchedulerException { // 校验 status if (!containsAny(status, JobStatusEnum.NORMAL.getStatus(), JobStatusEnum.STOP.getStatus())) { throw exception(JOB_CHANGE_STATUS_INVALID); } // 校验存在 JobDO job = validateJobExists(id); // 校验是否已经为当前状态 if (job.getStatus().equals(status)) { throw exception(JOB_CHANGE_STATUS_EQUALS); } // 更新 Job 状态 JobDO updateObj = JobDO.builder().id(id).status(status).build(); jobMapper.updateById(updateObj); // 更新状态 Job 到 Quartz 中 if (JobStatusEnum.NORMAL.getStatus().equals(status)) { // 开启 schedulerManager.resumeJob(job.getHandlerName()); } else { // 暂停 schedulerManager.pauseJob(job.getHandlerName()); } }
@Test public void testUpdateJobStatus_stopSuccess() throws SchedulerException { // mock 数据 JobDO job = randomPojo(JobDO.class, o -> o.setStatus(JobStatusEnum.NORMAL.getStatus())); jobMapper.insert(job); // 调用 jobService.updateJobStatus(job.getId(), JobStatusEnum.STOP.getStatus()); // 校验记录的属性是否正确 JobDO dbJob = jobMapper.selectById(job.getId()); assertEquals(JobStatusEnum.STOP.getStatus(), dbJob.getStatus()); // 校验调用 verify(schedulerManager).pauseJob(eq(job.getHandlerName())); }
@VisibleForTesting static Configuration getConfiguration() { Configuration conf = new HdfsConfiguration(); conf.addResource(FedBalance.FED_BALANCE_DEFAULT_XML); conf.addResource(FedBalance.FED_BALANCE_SITE_XML); return conf; }
@Test public void testClearStaleNamespacesInRouterStateIdContext() throws Exception { Router testRouter = new Router(); Configuration routerConfig = DFSRouter.getConfiguration(); routerConfig.set(FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS, "2000"); routerConfig.set(RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE, "false"); // Mock resolver classes routerConfig.setClass(RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, MockResolver.class, ActiveNamenodeResolver.class); routerConfig.setClass(RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, MockResolver.class, FileSubclusterResolver.class); testRouter.init(routerConfig); String nsID1 = "ns0"; String nsID2 = "ns1"; MockResolver resolver = (MockResolver)testRouter.getNamenodeResolver(); resolver.registerNamenode(createNamenodeReport(nsID1, "nn1", HAServiceProtocol.HAServiceState.ACTIVE)); resolver.registerNamenode(createNamenodeReport(nsID2, "nn1", HAServiceProtocol.HAServiceState.ACTIVE)); RouterRpcServer rpcServer = testRouter.getRpcServer(); rpcServer.getRouterStateIdContext().getNamespaceStateId(nsID1); rpcServer.getRouterStateIdContext().getNamespaceStateId(nsID2); resolver.disableNamespace(nsID1); Thread.sleep(3000); RouterStateIdContext context = rpcServer.getRouterStateIdContext(); assertEquals(2, context.getNamespaceIdMap().size()); testRouter.start(); Thread.sleep(3000); // wait clear stale namespaces RouterStateIdContext routerStateIdContext = rpcServer.getRouterStateIdContext(); int size = routerStateIdContext.getNamespaceIdMap().size(); assertEquals(1, size); rpcServer.stop(); rpcServer.close(); testRouter.close(); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline(); if (ClusteringUtils.isClusteringInstant(activeTimeline, currentInstant) || COMPACTION_ACTION.equals(currentInstant.getAction())) { return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant); } else { return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant); } }
@Test public void testConcurrentWritesWithInterleavingSuccessfulCompaction() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); // consider commits before this are all successful Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); // compaction 1 gets scheduled and finishes String newInstantTime = metaClient.createNewInstantTime(); // TODO: Remove sleep stmt once the modified times issue is fixed. // Sleep thread for at least 1sec for consecutive commits that way they do not have two commits modified times falls on the same millisecond. Thread.sleep(1000); createCompaction(newInstantTime, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); PreferWriterConflictResolutionStrategy strategy = new PreferWriterConflictResolutionStrategy(); HoodieCommitMetadata currentMetadata = createCommitMetadata(currentWriterInstant); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // writer 1 conflicts with compaction 1 Assertions.assertEquals(1, candidateInstants.size()); Assertions.assertEquals(newInstantTime, candidateInstants.get(0).getTimestamp()); ConcurrentOperation thatCommitOperation = new ConcurrentOperation(candidateInstants.get(0), metaClient); ConcurrentOperation thisCommitOperation = new ConcurrentOperation(currentInstant.get(), currentMetadata); Assertions.assertTrue(strategy.hasConflict(thisCommitOperation, thatCommitOperation)); try { strategy.resolveConflict(null, thisCommitOperation, thatCommitOperation); Assertions.fail("Cannot reach here, should have thrown a conflict"); } catch (HoodieWriteConflictException e) { // expected } }
@Nonnull public static <T> AggregateOperation1<T, MutableReference<T>, T> minBy( @Nonnull ComparatorEx<? super T> comparator ) { checkSerializable(comparator, "comparator"); return maxBy(comparator.reversed()); }
@Test public void when_minBy() { validateOpWithoutDeduct(minBy(naturalOrder()), MutableReference::get, 10L, 11L, 10L, 10L, 10L); }
public static String getCodeBase(Class<?> cls) { if (cls == null) { return null; } ProtectionDomain domain = cls.getProtectionDomain(); if (domain == null) { return null; } CodeSource source = domain.getCodeSource(); if (source == null) { return null; } URL location = source.getLocation(); if (location == null) { return null; } return location.getFile(); }
@Test void testGetCodeBase() { assertNull(ReflectUtils.getCodeBase(null)); assertNull(ReflectUtils.getCodeBase(String.class)); assertNotNull(ReflectUtils.getCodeBase(ReflectUtils.class)); }
protected Query getAggregationQuery(AggregationEventProcessorParameters parameters, long searchWithinMs, long executeEveryMs) { final Pivot.Builder pivotBuilder = Pivot.builder() .id(PIVOT_ID) .rollup(true); final ImmutableList<SeriesSpec> series = config.series() .stream() .map(s -> s.withId(metricName(s))) .collect(ImmutableList.toImmutableList()); if (!series.isEmpty()) { pivotBuilder.series(series); } // Wrap every aggregation with date range buckets of the searchWithin time range. // If the aggregation is configured to be using a sliding window (searchWithin > executeEveryMs) // the time ranges will overlap. // This allows us to run aggregations over larger time ranges than the searchWithin time. // The results will be received in time buckets of the searchWithin time size. final DateRangeBucket dateRangeBucket = buildDateRangeBuckets(parameters.timerange(), searchWithinMs, executeEveryMs); final List<BucketSpec> groupBy = new ArrayList<>(); // The first bucket must be the date range! groupBy.add(dateRangeBucket); if (!config.groupBy().isEmpty()) { final Values values = Values.builder().fields(config.groupBy()) .limit(Integer.MAX_VALUE) .build(); groupBy.add(values); // The pivot search type (as of Graylog 3.1.0) is using the "terms" aggregation under // the hood. The "terms" aggregation is meant to return the "top" terms and does not allow // and efficient retrieval and pagination over all terms. // Using Integer.MAX_VALUE as a limit can be very expensive with high cardinality grouping. // The ES documentation recommends to use the "Composite" aggregation instead. // // See the ES documentation for more details: // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-size // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html // // The "Composite" aggregation is only available since ES version 6.1, unfortunately. // // TODO: Either find a way to use the composite aggregation when the ES version in use is // recent enough, and/or use a more conservative limit here and make it configurable // by the user. } // We always have row groups because of the date range buckets pivotBuilder.rowGroups(groupBy); final Set<SearchType> searchTypes = Sets.newHashSet(pivotBuilder.build()); searchTypes.addAll(additionalSearchTypes); final Query.Builder queryBuilder = Query.builder() .id(QUERY_ID) .searchTypes(searchTypes) .query(decorateQuery(config)) .timerange(parameters.timerange()); final Set<String> streams = getStreams(parameters); if (!streams.isEmpty()) { queryBuilder.filter(filteringForStreamIds(streams)); } return queryBuilder.build(); }
@Test public void testAdditionalSearchTypes() { final long WINDOW_LENGTH = 30000; final AbsoluteRange timerange = AbsoluteRange.create(DateTime.now(DateTimeZone.UTC).minusSeconds(3600), DateTime.now(DateTimeZone.UTC)); var seriesCount = Count.builder().field("source").build(); var seriesCard = Cardinality.builder().field("source").build(); final AggregationEventProcessorConfig config = AggregationEventProcessorConfig.builder() .query("source:foo") .queryParameters(ImmutableSet.of()) .streams(Collections.emptySet()) .groupBy(Collections.emptyList()) .series(ImmutableList.of(seriesCount, seriesCard)) .conditions(null) .searchWithinMs(WINDOW_LENGTH) .executeEveryMs(WINDOW_LENGTH) .build(); final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder() .streams(Collections.emptySet()) .timerange(timerange) .batchSize(500) .build(); final PivotAggregationSearch pivotAggregationSearch = new PivotAggregationSearch( config, parameters, new AggregationSearch.User("test", DateTimeZone.UTC), eventDefinition, List.of(Pivot.builder() .id("risk-asset-1") .rowGroups(Values.builder().limit(10).field("Field").build()) .rollup(false) .series(Count.builder().build()) .build()), searchJobService, queryEngine, EventsConfigurationTestProvider.create(), moreSearch, new PermittedStreams(() -> Stream.of("00001")), notificationService, new QueryStringDecorators(Optional.empty()) ); final Query query = pivotAggregationSearch.getAggregationQuery(parameters, WINDOW_LENGTH, WINDOW_LENGTH); Assertions.assertThatCollection(query.searchTypes()).contains( Pivot.builder() .id("risk-asset-1") .rowGroups(Values.builder().limit(10).field("Field").build()) .rollup(false) .series(Count.builder().build()) .build()); }
public void init(ScannerReportWriter writer) { File analysisLog = writer.getFileStructure().analysisLog(); try (BufferedWriter fileWriter = Files.newBufferedWriter(analysisLog.toPath(), StandardCharsets.UTF_8)) { writePlugins(fileWriter); writeBundledAnalyzers(fileWriter); writeGlobalSettings(fileWriter); writeProjectSettings(fileWriter); writeModulesSettings(fileWriter); } catch (IOException e) { throw new IllegalStateException("Unable to write analysis log", e); } }
@Test public void shouldNotDumpSensitiveModuleProperties() throws Exception { DefaultInputModule rootModule = new DefaultInputModule(ProjectDefinition.create() .setBaseDir(temp.newFolder()) .setWorkDir(temp.newFolder()) .setProperty("sonar.projectKey", "foo") .setProperty("sonar.projectKey", "foo") .setProperty("sonar.login", "my_token") .setProperty("sonar.password", "azerty") .setProperty("sonar.cpp.license.secured", "AZERTY")); when(store.allModules()).thenReturn(singletonList(rootModule)); when(hierarchy.root()).thenReturn(rootModule); publisher.init(writer); assertThat(writer.getFileStructure().analysisLog()).exists(); assertThat(FileUtils.readFileToString(writer.getFileStructure().analysisLog(), StandardCharsets.UTF_8)).containsSubsequence( "sonar.cpp.license.secured=******", "sonar.login=******", "sonar.password=******", "sonar.projectKey=foo"); }
public boolean hasViewAccessToTemplate(PipelineTemplateConfig template, CaseInsensitiveString username, List<Role> roles, boolean isGroupAdministrator) { boolean hasViewAccessToTemplate = template.getAuthorization().isViewUser(username, roles); hasViewAccessToTemplate = hasViewAccessToTemplate || (template.isAllowGroupAdmins() && isGroupAdministrator); return hasViewAccessToTemplate; }
@Test public void shouldReturnFalseIfGroupAdminCanViewTemplate() { CaseInsensitiveString templateViewUser = new CaseInsensitiveString("view"); String templateName = "template"; PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplate(templateName, StageConfigMother.manualStage("stage")); template.getAuthorization().setAllowGroupAdmins(false); TemplatesConfig templates = new TemplatesConfig(template); assertThat(templates.hasViewAccessToTemplate(template, templateViewUser, null, true), is(false)); }
@JsonProperty public String getApplicationContextPath() { return applicationContextPath; }
@Test void testGetApplicationContext() { assertThat(http.getApplicationContextPath()).isEqualTo("/service"); }
public static Geometry toJtsGeometry(TDWay way, List<TDWay> innerWays) { if (way == null) { LOGGER.warning("way is null"); return null; } if (way.isForcePolygonLine()) { // may build a single line string if inner ways are empty return buildMultiLineString(way, innerWays); } if (way.getShape() != TDWay.LINE || innerWays != null && innerWays.size() > 0) { // Have to be careful here about polygons and lines again, the problem with // polygons is that a certain direction is forced, so we do not want to reverse // closed lines that are not meant to be polygons // may contain holes if inner ways are not empty Polygon polygon = buildPolygon(way, innerWays); if (polygon.isValid()) { return polygon; } return repairInvalidPolygon(polygon); } // not a closed line return buildLineString(way); }
@Test public void testBuildGeometryFromValidPolygon() { String testfile = "valid-polygon.wkt"; List<TDWay> ways = MockingUtils.wktPolygonToWays(testfile); Geometry geometry = JTSUtils.toJtsGeometry(ways.get(0), ways.subList(1, ways.size())); Assert.isTrue(geometry instanceof LineString); Assert.isTrue(geometry.isValid()); }
public static void maybeConfigureRocksDBConfigSetter(final KsqlConfig ksqlConfig) { final Map<String, Object> streamsProps = ksqlConfig.getKsqlStreamConfigProps(); final Class<?> clazz = (Class) streamsProps.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (clazz != null && org.apache.kafka.common.Configurable.class.isAssignableFrom(clazz)) { try { ((org.apache.kafka.common.Configurable) Utils.newInstance(clazz)) .configure(ksqlConfig.originals()); } catch (final Exception e) { throw new ConfigException( "Failed to configure Configurable RocksDBConfigSetter. " + StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG + ": " + clazz.getName(), e); } } }
@Test public void shouldStartWithNonConfigurableRocksDBConfigSetter() throws Exception { // Given: when(ksqlConfig.getKsqlStreamConfigProps()).thenReturn( ImmutableMap.of( StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, Class.forName("io.confluent.ksql.rest.util.RocksDBConfigSetterHandlerTest$NonConfigurableTestRocksDBConfigSetter")) ); // No error when: RocksDBConfigSetterHandler.maybeConfigureRocksDBConfigSetter(ksqlConfig); }
public static void tryCloseConnections(HazelcastInstance hazelcastInstance) { if (hazelcastInstance == null) { return; } HazelcastInstanceImpl factory = (HazelcastInstanceImpl) hazelcastInstance; closeSockets(factory); }
@Test public void testTryCloseConnections() { tryCloseConnections(hazelcastInstance); }
@Override @Transactional(rollbackFor = Exception.class) public void updateCombinationActivity(CombinationActivityUpdateReqVO updateReqVO) { // 校验存在 CombinationActivityDO activityDO = validateCombinationActivityExists(updateReqVO.getId()); // 校验状态 if (ObjectUtil.equal(activityDO.getStatus(), CommonStatusEnum.DISABLE.getStatus())) { throw exception(COMBINATION_ACTIVITY_STATUS_DISABLE_NOT_UPDATE); } // 校验商品冲突 validateProductConflict(updateReqVO.getSpuId(), updateReqVO.getId()); // 校验商品是否存在 validateProductExists(updateReqVO.getSpuId(), updateReqVO.getProducts()); // 更新活动 CombinationActivityDO updateObj = CombinationActivityConvert.INSTANCE.convert(updateReqVO); combinationActivityMapper.updateById(updateObj); // 更新商品 updateCombinationProduct(updateObj, updateReqVO.getProducts()); }
@Test public void testUpdateCombinationActivity_notExists() { // 准备参数 CombinationActivityUpdateReqVO reqVO = randomPojo(CombinationActivityUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> combinationActivityService.updateCombinationActivity(reqVO), COMBINATION_ACTIVITY_NOT_EXISTS); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testEpochSetInFetchRequest() { buildFetcher(); subscriptions.assignFromUser(singleton(tp0)); MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap(topicName, 4), tp -> 99, topicIds); client.updateMetadata(metadataResponse); subscriptions.seek(tp0, 10); assertEquals(1, sendFetches()); // Check for epoch in outgoing request MockClient.RequestMatcher matcher = body -> { if (body instanceof FetchRequest) { FetchRequest fetchRequest = (FetchRequest) body; fetchRequest.fetchData(topicNames).values().forEach(partitionData -> { assertTrue(partitionData.currentLeaderEpoch.isPresent(), "Expected Fetcher to set leader epoch in request"); assertEquals(99, partitionData.currentLeaderEpoch.get().longValue(), "Expected leader epoch to match epoch from metadata update"); }); return true; } else { fail("Should have seen FetchRequest"); return false; } }; client.prepareResponse(matcher, fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.pollNoWakeup(); assertEquals(0, consumerClient.pendingRequestCount()); }
@VisibleForTesting void checkRemoteFilenameField( String remoteFilenameFieldName, SFTPPutData data ) throws KettleStepException { remoteFilenameFieldName = environmentSubstitute( remoteFilenameFieldName ); if ( !Utils.isEmpty( remoteFilenameFieldName ) ) { data.indexOfRemoteFilename = getInputRowMeta().indexOfValue( remoteFilenameFieldName ); if ( data.indexOfRemoteFilename == -1 ) { // remote file name field is set, but was not found throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.CanNotFindField", remoteFilenameFieldName ) ); } } }
@Test public void checkRemoteFilenameField_FieldNameIsBlank() throws Exception { SFTPPutData data = new SFTPPutData(); step.checkRemoteFilenameField( "", data ); assertEquals( -1, data.indexOfSourceFileFieldName ); }
@Override public boolean isCurrency(final int column) { Preconditions.checkArgument(1 == column); return false; }
@Test void assertIsCurrency() throws SQLException { assertFalse(actualMetaData.isCurrency(1)); }
@Nullable @Override public Collection<Message> decodeMessages(@Nonnull RawMessage rawMessage) { try { final ResolvableInetSocketAddress remoteAddress = rawMessage.getRemoteAddress(); final InetSocketAddress sender = remoteAddress != null ? remoteAddress.getInetSocketAddress() : null; final byte[] payload = rawMessage.getPayload(); if (payload.length < 3) { LOG.debug("NetFlow message (source: {}) doesn't even fit the NetFlow version (size: {} bytes)", sender, payload.length); return null; } final ByteBuf buffer = Unpooled.wrappedBuffer(payload); switch (buffer.readByte()) { case PASSTHROUGH_MARKER: final NetFlowV5Packet netFlowV5Packet = NetFlowV5Parser.parsePacket(buffer); return netFlowV5Packet.records().stream() .map(record -> netFlowFormatter.toMessage(netFlowV5Packet.header(), record, sender)) .collect(Collectors.toList()); case ORDERED_V9_MARKER: // our "custom" netflow v9 that has all the templates in the same packet return decodeV9(sender, buffer); default: final List<RawMessage.SourceNode> sourceNodes = rawMessage.getSourceNodes(); final RawMessage.SourceNode sourceNode = sourceNodes.isEmpty() ? null : sourceNodes.get(sourceNodes.size() - 1); final String inputId = sourceNode == null ? "<unknown>" : sourceNode.inputId; LOG.warn("Unsupported NetFlow packet on input {} (source: {})", inputId, sender); return null; } } catch (FlowException e) { LOG.error("Error parsing NetFlow packet <{}> received from <{}>", rawMessage.getId(), rawMessage.getRemoteAddress(), e); if (LOG.isDebugEnabled()) { LOG.debug("NetFlow packet hexdump:\n{}", ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(rawMessage.getPayload()))); } return null; } catch (InvalidProtocolBufferException e) { LOG.error("Invalid NetFlowV9 entry found, cannot parse the messages", ExceptionUtils.getRootCause(e)); return null; } }
@Test public void decodeMessagesReturnsNullIfMessageWasInvalid() throws Exception { final byte[] b = "Foobar".getBytes(StandardCharsets.UTF_8); final InetSocketAddress source = new InetSocketAddress(InetAddress.getLocalHost(), 12345); final RawMessage rawMessage = new RawMessage(b, source); final Collection<Message> messages = codec.decodeMessages(rawMessage); assertThat(messages).isNull(); }
@Override public void flood(NeighbourMessageContext context) { Tools.stream(edgeService.getEdgePoints()) .filter(cp -> !cp.equals(context.inPort())) .forEach(cp -> sendTo(context.packet(), cp)); }
@Test public void flood() { Ethernet request = NeighbourTestUtils.createArpRequest(IP1); // Expect the packet to be emitted out all ports apart from the in port Sets.difference(Sets.newLinkedHashSet(EDGE_PORTS), Collections.singleton(CP1)) .forEach(cp -> { packetService.emit(outbound(request, cp)); expectLastCall().once(); }); replay(packetService); actions.flood(createContext(request, CP1, null)); verify(packetService); }
@Override public boolean test(final String resourceName) { return resourceName.matches(blackList); }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE") @SuppressWarnings("ResultOfMethodCallIgnored") @Test public void shouldNotBlacklistAnythingIfFailsToLoadFile() { blacklistFile.delete(); final Blacklist blacklist = new Blacklist(this.blacklistFile); assertFalse(blacklist.test("java.lang.Process")); assertFalse(blacklist.test("java.util.List")); assertFalse(blacklist.test("java.lang.ProcessEnvironment")); assertFalse(blacklist.test("java.lang.Class")); }
public static int markProtocolType(int source, SerializeType type) { return (type.getCode() << 24) | (source & 0x00FFFFFF); }
@Test public void testMarkProtocolType_ROCKETMQProtocolType() { int source = 16777215; SerializeType type = SerializeType.ROCKETMQ; byte[] result = new byte[4]; int x = RemotingCommand.markProtocolType(source, type); result[0] = (byte) (x >> 24); result[1] = (byte) (x >> 16); result[2] = (byte) (x >> 8); result[3] = (byte) x; assertThat(result).isEqualTo(new byte[] {1, -1, -1, -1}); }
public static byte[] downloadBytes(String url) { return HttpDownloader.downloadBytes(url); }
@Test @Disabled public void gimg2Test(){ final byte[] bytes = HttpUtil.downloadBytes("https://gimg2.baidu.com/image_search/src=http%3A%2F%2Fpic.jj20.com%2Fup%2Fallimg%2F1114%2F0H320120Z3%2F200H3120Z3-6-1200.jpg&refer=http%3A%2F%2Fpic.jj20.com&app=2002&size=f9999,10000&q=a80&n=0&g=0n&fmt=jpeg?sec=1621996490&t=8c384c2823ea453da15a1b9cd5183eea"); Console.log(Base64.encode(bytes)); }
public boolean typeContains(String text) { String contentType = LOCAL_REQUEST.get().getContentType(); return contentType != null && contentType.contains(text); }
@Test void testTypeContains() { background().scenario( "pathMatches('/hello') && typeContains('json')", "def response = { success: true }" ); request.path("/hello").contentType("application/json").method("GET"); handle(); match(response.getBodyConverted(), "{ success: true }"); }
public static String escape(CharSequence content) { return escape(content, JS_ESCAPE_FILTER); }
@Test public void escapeTest(){ String str = "*@-_+./(123你好)ABCabc"; String escape = EscapeUtil.escape(str); assertEquals("*@-_+./%28123%u4f60%u597d%29ABCabc", escape); String unescape = EscapeUtil.unescape(escape); assertEquals(str, unescape); }