file_name
stringlengths
6
86
file_path
stringlengths
45
249
content
stringlengths
47
6.26M
file_size
int64
47
6.26M
language
stringclasses
1 value
extension
stringclasses
1 value
repo_name
stringclasses
767 values
repo_stars
int64
8
14.4k
repo_forks
int64
0
1.17k
repo_open_issues
int64
0
788
repo_created_at
stringclasses
767 values
repo_pushed_at
stringclasses
767 values
TopicConfigEntryDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/TopicConfigEntryDto.java
package com.hermesworld.ais.galapagos.topics.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; @JsonSerialize @Getter public class TopicConfigEntryDto { private String name; private String value; private boolean isDefault; private boolean readOnly; private boolean sensitive; public TopicConfigEntryDto(String name, String value, boolean isDefault, boolean readOnly, boolean sensitive) { this.name = name; this.value = value; this.isDefault = isDefault; this.readOnly = readOnly; this.sensitive = sensitive; } }
637
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/TopicDto.java
package com.hermesworld.ais.galapagos.topics.controller; import com.hermesworld.ais.galapagos.topics.Criticality; import com.hermesworld.ais.galapagos.topics.MessagesPerDay; import com.hermesworld.ais.galapagos.topics.MessagesSize; import lombok.Getter; import java.util.List; @Getter public class TopicDto { private final String name; private final String topicType; private final String environmentId; private final String description; private final String externalInterfaceUrl; private final String ownerApplicationId; private final boolean deprecated; private final String deprecationText; private final String eolDate; private final boolean subscriptionApprovalRequired; private final boolean deletable; private final long compactionTimeMillis; private final long retentionTimeMillis; private final Criticality criticality; private final MessagesPerDay messagesPerDay; private final MessagesSize messagesSize; private final List<String> producers; public TopicDto(String name, String topicType, String environmentId, String description, String externalInterfaceUrl, String ownerApplicationId, boolean deprecated, String deprecationText, String eolDate, boolean subscriptionApprovalRequired, boolean deletable, long compactionTimeMillis, long retentionTimeMillis, Criticality criticality, MessagesPerDay messagesPerDay, MessagesSize messagesSize, List<String> producers) { this.name = name; this.topicType = topicType; this.environmentId = environmentId; this.description = description; this.externalInterfaceUrl = externalInterfaceUrl; this.ownerApplicationId = ownerApplicationId; this.deprecated = deprecated; this.deprecationText = deprecationText; this.eolDate = eolDate; this.subscriptionApprovalRequired = subscriptionApprovalRequired; this.deletable = deletable; this.compactionTimeMillis = compactionTimeMillis; this.retentionTimeMillis = retentionTimeMillis; this.criticality = criticality; this.messagesPerDay = messagesPerDay; this.messagesSize = messagesSize; this.producers = List.copyOf(producers); } }
2,304
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UpdateTopicDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/UpdateTopicDto.java
package com.hermesworld.ais.galapagos.topics.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; import lombok.Setter; import java.time.LocalDate; @Getter @Setter @JsonSerialize public class UpdateTopicDto { private String deprecationText; private String description; private boolean updateDescription; private LocalDate eolDate; public UpdateTopicDto() { } public UpdateTopicDto(String deprecationText, LocalDate eolDate, String description, boolean updateDescription) { this.deprecationText = deprecationText; this.eolDate = eolDate; this.description = description; this.updateDescription = updateDescription; } }
736
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangeTopicOwnerDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/ChangeTopicOwnerDto.java
package com.hermesworld.ais.galapagos.topics.controller; import lombok.Getter; import lombok.Setter; import jakarta.validation.constraints.NotNull; @Getter @Setter public class ChangeTopicOwnerDto { @NotNull(message = "producer id cannot be null!") private String producerApplicationId; }
300
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AddSchemaVersionDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/AddSchemaVersionDto.java
package com.hermesworld.ais.galapagos.topics.controller; import lombok.Getter; import lombok.Setter; @Getter @Setter public class AddSchemaVersionDto { private String jsonSchema; private String changeDescription; }
228
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CreateTopicDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/CreateTopicDto.java
package com.hermesworld.ais.galapagos.topics.controller; import com.hermesworld.ais.galapagos.topics.Criticality; import com.hermesworld.ais.galapagos.topics.MessagesPerDay; import com.hermesworld.ais.galapagos.topics.MessagesSize; import com.hermesworld.ais.galapagos.topics.TopicType; import lombok.Getter; import lombok.Setter; import java.util.Map; @Getter @Setter public class CreateTopicDto { private String name; private TopicType topicType; private String description; private String ownerApplicationId; private boolean subscriptionApprovalRequired; private Integer partitionCount; private Map<String, String> topicConfig; private long compactionTimeMillis; private long retentionTimeMillis; private Criticality criticality; private MessagesPerDay messagesPerDay; private MessagesSize messagesSize; }
872
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicNameSuggestionQueryDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/TopicNameSuggestionQueryDto.java
package com.hermesworld.ais.galapagos.topics.controller; import com.hermesworld.ais.galapagos.topics.TopicType; import lombok.Getter; import lombok.Setter; @Getter @Setter public class TopicNameSuggestionQueryDto { private TopicType topicType; private String applicationId; private String environmentId; private String businessCapabilityId; }
366
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicNameDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/TopicNameDto.java
package com.hermesworld.ais.galapagos.topics.controller; import lombok.Getter; import lombok.Setter; @Getter @Setter public class TopicNameDto { private String name; public TopicNameDto(String name) { this.name = name; } }
248
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ConsumerRecordDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/ConsumerRecordDto.java
package com.hermesworld.ais.galapagos.topics.controller; import java.util.Map; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; @JsonSerialize @Getter public class ConsumerRecordDto { private String key; private String value; private long offset; private long timestamp; private int partition; private Map<String, String> headers; ConsumerRecordDto(String key, String value, long offset, long timestamp, int partition, Map<String, String> headers) { this.key = key; this.value = value; this.offset = offset; this.timestamp = timestamp; this.partition = partition; this.headers = headers; } }
732
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AddProducerDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/AddProducerDto.java
package com.hermesworld.ais.galapagos.topics.controller; import lombok.Getter; import lombok.Setter; @Getter @Setter public class AddProducerDto { private String producerApplicationId; }
195
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/service/TopicService.java
package com.hermesworld.ais.galapagos.topics.service; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.topics.SchemaCompatCheckMode; import com.hermesworld.ais.galapagos.topics.SchemaMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import org.apache.kafka.clients.consumer.ConsumerRecord; import javax.annotation.CheckReturnValue; import java.time.LocalDate; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; public interface TopicService { @CheckReturnValue CompletableFuture<TopicMetadata> createTopic(String environmentId, TopicMetadata topic, Integer partitionCount, Map<String, String> topicConfig); @CheckReturnValue boolean canDeleteTopic(String environmentId, String topicName); @CheckReturnValue CompletableFuture<Void> deleteTopic(String environmentId, String topicName); @CheckReturnValue CompletableFuture<Void> updateTopicDescription(String environmentId, String topicName, String newDescription); /** * Marks the given topic as deprecated. Unlike most other operations, this is done automatically on <b>all</b> known * clusters (environments) where the topic exists. * * @param topicName Name of the topic. * @param deprecationText Deprecation text, e.g. a reasoning and a reference to an alternative topic. * @param eolDate EOL date until when the topic is guaranteed to exist. The topic can only be deleted before * this date when it has no more subscribers (standard topic deletion rules and order apply). * @return A future which completed once the topic has been deprecated on all known clusters, or which completes * exceptionally when the deprecation could not be performed successfully. Note that the topic may be * deprecated on <b>some</b> stages even if the future completes exceptionally. */ @CheckReturnValue CompletableFuture<Void> markTopicDeprecated(String topicName, String deprecationText, LocalDate eolDate); /** * Removes the deprecation mark from the given topic. Unlike most other operations, this is done automatically on * <b>all</b> known clusters (environments) where the topic exists. * * @param topicName Name of the topic. * @return A future which completed once the topic has been undeprecated on all known clusters, or which completes * exceptionally when the deprecation removal could not be performed successfully. Note that the topic may * be deprecated on <b>some</b> stages even if the future completes exceptionally. */ @CheckReturnValue CompletableFuture<Void> unmarkTopicDeprecated(String topicName); /** * Updates the "subscriptionApprovalRequired" flag for a given topic on a given environment. This is a stageable * change, and can only be performed on non-"staging only" environments. <br> * If the flag is activated for a topic, all already existing subscriptions on that environment for the topic will * initially receive the <code>APPROVED</code> state. This can be updated using the * <code>SubscriptionService</code>. <br> * Once it is deactivated for a topic, all <code>PENDING</code> subscriptions for the topic on the given environment * will be converted to <code>APPROVED</code> ones (this is not done by the <code>TopicService</code>, but by the * class <code>SubscriptionTopicListener</code>). * * @param environmentId ID of the environment containing the topic. Must not be a "staging only" * environment. * @param topicName Name of the topic to update. * @param subscriptionApprovalRequired New flag for the topic. * @return A completable future which completes once all related changes are performed. */ @CheckReturnValue CompletableFuture<Void> setSubscriptionApprovalRequiredFlag(String environmentId, String topicName, boolean subscriptionApprovalRequired); @CheckReturnValue List<TopicMetadata> listTopics(String environmentId); @CheckReturnValue Optional<TopicMetadata> getTopic(String environmentId, String topicName); @CheckReturnValue List<SchemaMetadata> getTopicSchemaVersions(String environmentId, String topicName); @CheckReturnValue Optional<SchemaMetadata> getSchemaById(String environmentId, String schemaId); @CheckReturnValue CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, String topicName, String jsonSchema, String changeDescription, SchemaCompatCheckMode skipCompatCheck); @CheckReturnValue CompletableFuture<Void> deleteLatestTopicSchemaVersion(String environmentId, String topicName); /** * Adds a new JSON schema version to this topic. This variant of this method takes a complete {@link SchemaMetadata} * object, which is usually used during staging only. This method checks that the preceding version of the schema * exists on the given environment, e.g. you can add a schema version #3 only if #2 is already existing. If this * condition is not fulfilled, a failed <code>CompletableFuture</code> (with an {@link IllegalStateException} as * cause) is returned. * * @param environmentId Environment ID to add the schema version to. * @param metadata Metadata of the schema to add, including topic name and schema version. * @return A Future which completes when the operation completes, or a failed future when the schema could not be * added. */ @CheckReturnValue CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, SchemaMetadata metadata, SchemaCompatCheckMode skipCompatCheck); @CheckReturnValue CompletableFuture<TopicCreateParams> buildTopicCreateParams(String environmentId, String topicName); @CheckReturnValue CompletableFuture<List<ConsumerRecord<String, String>>> peekTopicData(String environmentId, String topicName, int limit); @CheckReturnValue CompletableFuture<Void> addTopicProducer(String environmentId, String topicName, String producerId); @CheckReturnValue CompletableFuture<Void> removeTopicProducer(String environmentId, String topicName, String producerId); @CheckReturnValue CompletableFuture<Void> changeTopicOwner(String environmentId, String topicName, String newApplicationOwnerId); }
6,634
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ValidatingTopicService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/service/ValidatingTopicService.java
package com.hermesworld.ais.galapagos.topics.service; /** * Interface of a TopicService which additionally checks "business" validation rules, e.g. deletion of topics only if no * subscribers, creation of topics only on non-staging-only environments etc. <br> * Note that the "plain" TopicService still does technical and access validations. <br> * The <code>TopicController</code> uses an implementation of this interface, while the "plain" {@link TopicService} is * used by the Staging engine. <br> * This interface is only a marker interface (for Dependency Injection) and does not add any new methods. * * @author AlbrechtFlo */ public interface ValidatingTopicService extends TopicService { }
708
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicServiceImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/service/impl/TopicServiceImpl.java
package com.hermesworld.ais.galapagos.topics.service.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.events.GalapagosEventManager; import com.hermesworld.ais.galapagos.events.GalapagosEventSink; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.naming.InvalidTopicNameException; import com.hermesworld.ais.galapagos.naming.NamingService; import com.hermesworld.ais.galapagos.schemas.*; import com.hermesworld.ais.galapagos.security.CurrentUserService; import com.hermesworld.ais.galapagos.topics.*; import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig; import com.hermesworld.ais.galapagos.topics.service.TopicService; import com.hermesworld.ais.galapagos.util.FutureUtil; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.everit.json.schema.Schema; import org.everit.json.schema.SchemaException; import org.everit.json.schema.loader.SchemaLoader; import org.json.JSONException; import org.json.JSONObject; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Service; import java.time.LocalDate; import java.time.ZonedDateTime; import java.util.*; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @Service @Qualifier(value = "nonvalidating") @Slf4j public class TopicServiceImpl implements TopicService, InitPerCluster { private final KafkaClusters kafkaClusters; private final NamingService namingService; private final CurrentUserService userService; private final ApplicationsService applicationsService; private final GalapagosTopicConfig topicSettings; private final GalapagosEventManager eventManager; private static final Comparator<TopicMetadata> topicsComparator = Comparator.comparing(TopicMetadata::getName); private static final Comparator<SchemaMetadata> schemaVersionsComparator = Comparator .comparingInt(SchemaMetadata::getSchemaVersion); static final String METADATA_TOPIC_NAME = "topics"; static final String SCHEMA_TOPIC_NAME = "schemas"; public TopicServiceImpl(KafkaClusters kafkaClusters, ApplicationsService applicationsService, NamingService namingService, CurrentUserService userService, GalapagosTopicConfig topicSettings, GalapagosEventManager eventManager) { this.kafkaClusters = kafkaClusters; this.applicationsService = applicationsService; this.namingService = namingService; this.userService = userService; this.topicSettings = topicSettings; this.eventManager = eventManager; } @Override public void init(KafkaCluster cluster) { getTopicRepository(cluster).getObjects(); getSchemaRepository(cluster).getObjects(); } @Override public CompletableFuture<TopicMetadata> createTopic(String environmentId, TopicMetadata topic, Integer partitionCount, Map<String, String> topicConfig) { KnownApplication ownerApplication = applicationsService.getKnownApplication(topic.getOwnerApplicationId()) .orElse(null); if (ownerApplication == null) { return CompletableFuture.failedFuture( new IllegalArgumentException("Unknown application ID: " + topic.getOwnerApplicationId())); } KafkaCluster environment = kafkaClusters.getEnvironment(environmentId).orElse(null); if (environment == null) { return FutureUtil.noSuchEnvironment(environmentId); } ApplicationMetadata metadata = applicationsService .getApplicationMetadata(environmentId, topic.getOwnerApplicationId()).orElse(null); if (metadata == null) { return CompletableFuture.failedFuture(new IllegalArgumentException("Application " + topic.getOwnerApplicationId() + " is not registered on environment " + environmentId)); } if (!applicationsService.isUserAuthorizedFor(metadata.getApplicationId())) { return CompletableFuture.failedFuture(new IllegalStateException( "Current user is no owner of application " + metadata.getApplicationId())); } try { namingService.validateTopicName(topic.getName(), topic.getType(), ownerApplication); } catch (InvalidTopicNameException e) { return CompletableFuture.failedFuture(e); } GalapagosEventSink eventSink = eventManager.newEventSink(environment); return environment.getActiveBrokerCount().thenCompose(brokerCount -> { int replicationFactor = (topic.getType() != TopicType.INTERNAL && topic.getCriticality() == Criticality.CRITICAL) ? topicSettings.getCriticalReplicationFactor() : topicSettings.getStandardReplicationFactor(); if (brokerCount < replicationFactor) { replicationFactor = brokerCount; } Integer pc = partitionCount; if (pc == null || pc < 1 || pc > topicSettings.getMaxPartitionCount()) { pc = topicSettings.getDefaultPartitionCount(); } TopicCreateParams createParams = new TopicCreateParams(pc, replicationFactor); topicConfig.forEach(createParams::setTopicConfig); TopicBasedRepository<TopicMetadata> topicRepository = environment.getRepository(METADATA_TOPIC_NAME, TopicMetadata.class); return environment.createTopic(topic.getName(), createParams).thenCompose(o -> topicRepository.save(topic)) .thenCompose(o -> eventSink.handleTopicCreated(topic, createParams)).thenApply(o -> topic); }); } @Override public CompletableFuture<Void> addTopicProducer(String environmentId, String topicName, String producerId) { return doWithClusterAndTopic(environmentId, topicName, (kafkaCluster, metadata, eventSink) -> { if (metadata.getType() == TopicType.COMMANDS) { return CompletableFuture.failedFuture( new IllegalStateException("For Command Topics, subscribe to the Topic to add a new Producer")); } List<String> producerList = new ArrayList<>(metadata.getProducers()); producerList.add(producerId); metadata.setProducers(producerList); TopicMetadata newTopic = new TopicMetadata(metadata); return getTopicRepository(kafkaCluster).save(newTopic) .thenCompose(o -> eventSink.handleAddTopicProducer(newTopic, producerId)); }); } @Override public CompletableFuture<Void> removeTopicProducer(String envId, String topicName, String producerId) { return doWithClusterAndTopic(envId, topicName, (kafkaCluster, metadata, eventSink) -> { if (metadata.getType() == TopicType.COMMANDS) { return CompletableFuture.failedFuture( new IllegalStateException("For Command Topics, subscribe to the Topic to remove a Producer")); } List<String> producerList = new ArrayList<>(metadata.getProducers()); producerList.remove(producerId); metadata.setProducers(producerList); TopicMetadata newTopic = new TopicMetadata(metadata); return getTopicRepository(kafkaCluster).save(newTopic) .thenCompose(o -> eventSink.handleRemoveTopicProducer(newTopic, producerId)); }); } @Override public CompletableFuture<Void> changeTopicOwner(String environmentId, String topicName, String newApplicationOwnerId) { return doOnAllStages(topicName, (kafkaCluster, metadata, eventSink) -> { if (metadata.getType() == TopicType.INTERNAL) { return CompletableFuture .failedFuture(new IllegalStateException("Cannot change owner for internal topics")); } String previousOwnerApplicationId = metadata.getOwnerApplicationId(); List<String> producerList = new ArrayList<>(metadata.getProducers()); producerList.add(metadata.getOwnerApplicationId()); metadata.setOwnerApplicationId(newApplicationOwnerId); producerList.remove(newApplicationOwnerId); metadata.setProducers(producerList); TopicMetadata newTopic = new TopicMetadata(metadata); return getTopicRepository(kafkaCluster).save(newTopic) .thenCompose(o -> eventSink.handleTopicOwnerChanged(newTopic, previousOwnerApplicationId)); }); } @Override public boolean canDeleteTopic(String environmentId, String topicName) { // business checks in ValidatingTopicServiceImpl return getTopic(environmentId, topicName).isPresent(); } @Override public CompletableFuture<Void> deleteTopic(String environmentId, String topicName) { return doWithClusterAndTopic(environmentId, topicName, (kafkaCluster, metadata, eventSink) -> kafkaCluster.deleteTopic(topicName) .thenCompose(o -> getTopicRepository(kafkaCluster).delete(metadata)) .thenCompose(o -> deleteTopicSchemas(kafkaCluster, topicName)) .thenCompose(o -> eventSink.handleTopicDeleted(metadata))); } @Override public CompletableFuture<Void> updateTopicDescription(String environmentId, String topicName, String newDescription) { return doWithClusterAndTopic(environmentId, topicName, (kafkaCluster, metadata, eventSink) -> { TopicMetadata newMeta = new TopicMetadata(metadata); newMeta.setDescription(newDescription); return getTopicRepository(kafkaCluster).save(newMeta) .thenCompose(o -> eventSink.handleTopicDescriptionChanged(newMeta)); }); } @Override public CompletableFuture<Void> markTopicDeprecated(String topicName, String deprecationText, LocalDate eolDate) { return doOnAllStages(topicName, (kafkaCluster, metadata, eventSink) -> { TopicMetadata newMeta = new TopicMetadata(metadata); newMeta.setDeprecated(true); newMeta.setDeprecationText(deprecationText); newMeta.setEolDate(eolDate); return getTopicRepository(kafkaCluster).save(newMeta) .thenCompose(o2 -> eventSink.handleTopicDeprecated(newMeta)); }); } @Override public CompletableFuture<Void> unmarkTopicDeprecated(String topicName) { return doOnAllStages(topicName, (kafkaCluster, metadata, eventSink) -> { TopicMetadata newMeta = new TopicMetadata(metadata); newMeta.setDeprecated(false); newMeta.setDeprecationText(null); newMeta.setEolDate(null); return getTopicRepository(kafkaCluster).save(newMeta) .thenCompose(o2 -> eventSink.handleTopicUndeprecated(newMeta)); }); } @Override public CompletableFuture<Void> setSubscriptionApprovalRequiredFlag(String environmentId, String topicName, boolean subscriptionApprovalRequired) { return doWithClusterAndTopic(environmentId, topicName, (kafkaCluster, metadata, eventSink) -> { if (metadata.isSubscriptionApprovalRequired() == subscriptionApprovalRequired) { return FutureUtil.noop(); } if (metadata.getType() == TopicType.INTERNAL) { return CompletableFuture.failedFuture(new IllegalStateException( "Cannot update subscriptionApprovalRequired flag for application internal topics")); } TopicMetadata newMeta = new TopicMetadata(metadata); newMeta.setSubscriptionApprovalRequired(subscriptionApprovalRequired); return getTopicRepository(kafkaCluster).save(newMeta) .thenCompose(o -> eventSink.handleTopicSubscriptionApprovalRequiredFlagChanged(newMeta)); }); } @Override public Optional<TopicMetadata> getTopic(String environmentId, String topicName) { KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return Optional.empty(); } return getTopicRepository(kafkaCluster).getObject(topicName); } @Override public List<TopicMetadata> listTopics(String environmentId) { KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return Collections.emptyList(); } return getTopicRepository(kafkaCluster).getObjects().stream().sorted(topicsComparator) .collect(Collectors.toList()); } @Override public List<SchemaMetadata> getTopicSchemaVersions(String environmentId, String topicName) { KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return Collections.emptyList(); } return getSchemaRepository(kafkaCluster).getObjects().stream() .filter(meta -> topicName.equals(meta.getTopicName())).sorted(schemaVersionsComparator) .collect(Collectors.toList()); } @Override public Optional<SchemaMetadata> getSchemaById(String environmentId, String schemaId) { return kafkaClusters.getEnvironment(environmentId) .flatMap(cluster -> getSchemaRepository(cluster).getObject(schemaId)); } @Override public CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, String topicName, String jsonSchema, String changeDescription, SchemaCompatCheckMode skipCompatCheck) { String userName = userService.getCurrentUserName().orElse(null); if (userName == null) { return CompletableFuture.failedFuture(new IllegalStateException("No user currently logged in")); } int nextVersionNo = 1; List<SchemaMetadata> existingVersions = getTopicSchemaVersions(environmentId, topicName); if (!existingVersions.isEmpty()) { SchemaMetadata previousVersion = existingVersions.get(existingVersions.size() - 1); nextVersionNo = previousVersion.getSchemaVersion() + 1; } SchemaMetadata newSchemaVersion = new SchemaMetadata(); // if it will replace an existing version, the private method will update the ID newSchemaVersion.setId(UUID.randomUUID().toString()); newSchemaVersion.setTopicName(topicName); newSchemaVersion.setCreatedBy(userName); newSchemaVersion.setCreatedAt(ZonedDateTime.now()); newSchemaVersion.setJsonSchema(jsonSchema); newSchemaVersion.setChangeDescription(changeDescription); newSchemaVersion.setSchemaVersion(nextVersionNo); return addTopicSchemaVersion(environmentId, newSchemaVersion, skipCompatCheck); } @Override public CompletableFuture<Void> deleteLatestTopicSchemaVersion(String environmentId, String topicName) { List<SchemaMetadata> existingVersions = getTopicSchemaVersions(environmentId, topicName); String nextEnvId = nextStageId(environmentId).orElse(null); if (existingVersions.isEmpty()) { return CompletableFuture .failedFuture(new IllegalStateException("No Schemas on current stage for topic " + topicName)); } SchemaMetadata latestSchemaOnCurrentStage = existingVersions.get(existingVersions.size() - 1); SchemaMetadata schemaOnNextStage = getTopicSchemaVersions(nextEnvId, topicName).stream() .filter(v -> latestSchemaOnCurrentStage.getSchemaVersion() == v.getSchemaVersion()).findFirst() .orElse(null); KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return FutureUtil.noSuchEnvironment(environmentId); } if (schemaOnNextStage != null) { return CompletableFuture.failedFuture(new IllegalStateException(""" The selected schema already exists on the next stage! To delete \ this schema you have to delete it there first!\ """)); } GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); TopicMetadata metadata = getTopicRepository(kafkaCluster).getObject(topicName).orElse(null); if (metadata == null) { return noSuchTopic(environmentId, topicName); } return getSchemaRepository(kafkaCluster).delete(latestSchemaOnCurrentStage) .thenCompose(o -> eventSink.handleTopicSchemaDeleted(metadata)); } @Override public CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, SchemaMetadata schemaMetadata, SchemaCompatCheckMode skipCompatCheck) { String userName = userService.getCurrentUserName().orElse(null); if (userName == null) { return CompletableFuture.failedFuture(new IllegalStateException("No user currently logged in")); } KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return FutureUtil.noSuchEnvironment(environmentId); } String topicName = schemaMetadata.getTopicName(); TopicMetadata metadata = getTopicRepository(kafkaCluster).getObject(topicName).orElse(null); if (metadata == null) { return noSuchTopic(environmentId, topicName); } if (metadata.getType() == TopicType.INTERNAL) { return CompletableFuture .failedFuture(new IllegalStateException("Cannot add JSON schemas to internal topics")); } List<SchemaMetadata> existingVersions = getTopicSchemaVersions(environmentId, topicName); Schema newSchema; try { newSchema = compileSchema(schemaMetadata.getJsonSchema()); } catch (JSONException | SchemaException e) { return CompletableFuture.failedFuture(new IllegalArgumentException("Could not parse JSON schema", e)); } JSONObject json = new JSONObject(schemaMetadata.getJsonSchema()); if (!json.has("$schema")) { return CompletableFuture.failedFuture( new IllegalArgumentException("The JSON Schema must declare a \"$schema\" value on first level.")); } if (newSchema.definesProperty("data") && (metadata.getType() == TopicType.EVENTS || metadata.getType() == TopicType.COMMANDS)) { return CompletableFuture.failedFuture(new IllegalArgumentException( """ The JSON Schema must not declare a "data" object on first level.\ The JSON Schema must not contain the CloudEvents fields, but only the contents of the "data" field.\ """)); } if (existingVersions.isEmpty() && schemaMetadata.getSchemaVersion() != 1) { return CompletableFuture.failedFuture(new IllegalArgumentException("Illegal next schema version number #" + schemaMetadata.getSchemaVersion() + " for topic " + topicName)); } if (!existingVersions.isEmpty() && existingVersions.get(existingVersions.size() - 1) .getSchemaVersion() != schemaMetadata.getSchemaVersion() - 1) { return CompletableFuture.failedFuture(new IllegalArgumentException("Illegal next schema version number #" + schemaMetadata.getSchemaVersion() + " for topic " + topicName)); } SchemaMetadata previousVersion = existingVersions.isEmpty() ? null : existingVersions.get(existingVersions.size() - 1); if (previousVersion != null && skipCompatCheck == SchemaCompatCheckMode.CHECK_SCHEMA) { try { Schema previousSchema = compileSchema(previousVersion.getJsonSchema()); // additional test: if both are equal, do not accept (save a tree!) if (SchemaUtil.areEqual(newSchema, previousSchema)) { return CompletableFuture.failedFuture(new IllegalArgumentException( "The new schema is identical to the latest schema of this topic.")); } SchemaCompatibilityValidator validator; if (metadata.getType() == TopicType.COMMANDS) { validator = new SchemaCompatibilityValidator(newSchema, previousSchema, new ProducerCompatibilityErrorHandler( topicSettings.getSchemas().isAllowAddedPropertiesOnCommandTopics())); } else { validator = new SchemaCompatibilityValidator(previousSchema, newSchema, new ConsumerCompatibilityErrorHandler( topicSettings.getSchemas().isAllowRemovedOptionalProperties())); } validator.validate(); } catch (JSONException e) { // how, on earth, did it get into the repo then??? log.error("Invalid JSON schema in repository found for topic " + topicName + " on environment " + environmentId + " with schema version " + previousVersion.getSchemaVersion()); // danger zone here: allow full replacement of invalid schema (fallthrough) } catch (IncompatibleSchemaException e) { return CompletableFuture.failedFuture(e); } } if (existingVersions.isEmpty() && schemaMetadata.getChangeDescription() != null) { return CompletableFuture.failedFuture( new IllegalArgumentException("Cant have a change description for schema with version number #1.")); } if (!existingVersions.isEmpty() && schemaMetadata.getChangeDescription() == null) { return CompletableFuture.failedFuture(new IllegalArgumentException( "Change Description has to be set for schemas with version greater 1.")); } // copy to be safe here SchemaMetadata newSchemaVersion = new SchemaMetadata(schemaMetadata); GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); return getSchemaRepository(kafkaCluster).save(newSchemaVersion) .thenCompose(o -> eventSink.handleTopicSchemaAdded(metadata, newSchemaVersion)) .thenApply(v -> newSchemaVersion); } @Override public CompletableFuture<TopicCreateParams> buildTopicCreateParams(String environmentId, String topicName) { return kafkaClusters.getEnvironment(environmentId).map(cluster -> cluster.buildTopicCreateParams(topicName)) .orElse(FutureUtil.noSuchEnvironment(environmentId)); } @Override public CompletableFuture<List<ConsumerRecord<String, String>>> peekTopicData(String environmentId, String topicName, int limit) { // only allow peek of API topics TopicMetadata metadata = getTopic(environmentId, topicName).orElse(null); if (metadata == null) { return noSuchTopic(environmentId, topicName); } if (metadata.getType() == TopicType.INTERNAL) { return CompletableFuture.failedFuture( new IllegalStateException("Data of internal topics cannot be retrieved via Galapagos.")); } return kafkaClusters.getEnvironment(environmentId).map(cluster -> cluster.peekTopicData(topicName, limit)) .orElse(FutureUtil.noSuchEnvironment(environmentId)); } private Optional<String> nextStageId(String environmentId) { List<String> environmentIds = kafkaClusters.getEnvironmentIds(); for (int i = 0; i < environmentIds.size() - 1; i++) { if (environmentId.equals(environmentIds.get(i))) { return Optional.of(environmentIds.get(i + 1)); } } return Optional.empty(); } private TopicBasedRepository<TopicMetadata> getTopicRepository(KafkaCluster kafkaCluster) { return kafkaCluster.getRepository(METADATA_TOPIC_NAME, TopicMetadata.class); } private TopicBasedRepository<SchemaMetadata> getSchemaRepository(KafkaCluster kafkaCluster) { return kafkaCluster.getRepository(SCHEMA_TOPIC_NAME, SchemaMetadata.class); } private CompletableFuture<Void> doWithClusterAndTopic(String environmentId, String topicName, TopicServiceAction action) { KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return FutureUtil.noSuchEnvironment(environmentId); } TopicMetadata metadata = getTopic(environmentId, topicName).orElse(null); if (metadata == null) { return noSuchTopic(environmentId, topicName); } GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); return action.apply(kafkaCluster, metadata, eventSink); } private CompletableFuture<Void> doOnAllStages(String topicName, TopicServiceAction action) { List<String> environmentIds = kafkaClusters.getEnvironmentIds(); // only operate on environments where this topic exists environmentIds = environmentIds.stream() .filter(env -> kafkaClusters.getEnvironment(env) .map(cluster -> getTopicRepository(cluster).containsObject(topicName)).orElse(false)) .collect(Collectors.toList()); if (environmentIds.isEmpty()) { return CompletableFuture .failedFuture(new NoSuchElementException("Topic " + topicName + " not found on any environment")); } CompletableFuture<Void> result = CompletableFuture.completedFuture(null); // Build Event Sinks here to avoid missing Thread-Local issues Map<String, GalapagosEventSink> eventSinks = new HashMap<>(); environmentIds.stream().map(kafkaClusters::getEnvironment).filter(Optional::isPresent) .forEach(op -> eventSinks.put(op.get().getId(), eventManager.newEventSink(op.get()))); // perform action on one environment after another. This could theoretically be optimized to parallel execution, // but this way, one failure on one stage at least avoids deprecation on the next stages. for (String envId : environmentIds) { TopicServiceAction wrappedAction = (cluster, metadata, sink) -> action.apply(cluster, metadata, eventSinks.get(envId)); result = result.thenCompose(o -> doWithClusterAndTopic(envId, topicName, wrappedAction)); } return result; } private CompletableFuture<Void> deleteTopicSchemas(KafkaCluster cluster, String topicName) { CompletableFuture<Void> result = FutureUtil.noop(); TopicBasedRepository<SchemaMetadata> schemaRepository = getSchemaRepository(cluster); for (SchemaMetadata schema : schemaRepository.getObjects()) { if (topicName.equals(schema.getTopicName())) { result = result.thenCompose(o -> schemaRepository.delete(schema)); } } return result; } private static Schema compileSchema(String source) { JSONObject obj = new JSONObject(source); return SchemaLoader.builder().draftV7Support().schemaJson(obj).build().load().build(); } private static <T> CompletableFuture<T> noSuchTopic(String environmentId, String topicName) { return CompletableFuture.failedFuture(new NoSuchElementException( "No topic with name " + topicName + " found on environment " + environmentId + ".")); } private interface TopicServiceAction { CompletableFuture<Void> apply(KafkaCluster cluster, TopicMetadata topic, GalapagosEventSink eventSink); } }
28,837
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ValidatingTopicServiceImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/service/impl/ValidatingTopicServiceImpl.java
package com.hermesworld.ais.galapagos.topics.service.impl; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService; import com.hermesworld.ais.galapagos.topics.*; import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig; import com.hermesworld.ais.galapagos.topics.service.TopicService; import com.hermesworld.ais.galapagos.topics.service.ValidatingTopicService; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Primary; import org.springframework.stereotype.Service; import java.time.LocalDate; import java.time.Period; import java.util.*; import java.util.concurrent.CompletableFuture; import java.util.function.Function; import java.util.stream.Collectors; /** * Wraps the real Topic Service to perform validations which should <b>not</b> be performed during Staging (e.g., if the * current stage allows direct Topic creation, which would be a bad check during staging). For "normal" service clients, * this should be the default Topic Service to use. */ @Service @Primary public class ValidatingTopicServiceImpl implements ValidatingTopicService { private final TopicService topicService; private final SubscriptionService subscriptionService; private final KafkaClusters kafkaClusters; private final ApplicationsService applicationsService; private final GalapagosTopicConfig topicConfig; private final boolean schemaDeleteWithSub; public ValidatingTopicServiceImpl(@Qualifier(value = "nonvalidating") TopicService topicService, SubscriptionService subscriptionService, ApplicationsService applicationsService, KafkaClusters kafkaClusters, GalapagosTopicConfig topicConfig, @Value("${info.toggles.schemaDeleteWithSub:false}") boolean schemaDeleteWithSub) { this.topicService = topicService; this.subscriptionService = subscriptionService; this.applicationsService = applicationsService; this.kafkaClusters = kafkaClusters; this.topicConfig = topicConfig; this.schemaDeleteWithSub = schemaDeleteWithSub; } @Override public CompletableFuture<TopicMetadata> createTopic(String environmentId, TopicMetadata topic, Integer partitionCount, Map<String, String> topicConfig) { if ((topic.getMessagesPerDay() == null || topic.getMessagesSize() == null) && topic.getType() != TopicType.INTERNAL) { return CompletableFuture.failedFuture(new IllegalStateException( "Please select the number of messages per day and how big your messages are!")); } return checkOnNonStaging(environmentId, "create topics", TopicMetadata.class) .orElseGet(() -> topicService.createTopic(environmentId, topic, partitionCount, topicConfig)); } @Override public boolean canDeleteTopic(String environmentId, String topicName) { TopicMetadata topic = getTopic(environmentId, topicName).orElse(null); if (topic == null) { return false; } if (topic.getType() == TopicType.INTERNAL) { return kafkaClusters.getEnvironmentMetadata(environmentId).map(env -> !env.isStagingOnly()).orElse(false); } LocalDate eolDate = topic.getEolDate(); boolean isEolDatePast = eolDate != null && eolDate.isBefore(LocalDate.now()); if (!subscriptionService.getSubscriptionsForTopic(environmentId, topicName, false).isEmpty() && !isEolDatePast) { return false; } String nextEnvId = nextStageId(environmentId).orElse(null); if (nextEnvId == null) { return true; } return topicService.getTopic(nextEnvId, topicName).isEmpty(); } @Override public CompletableFuture<Void> deleteLatestTopicSchemaVersion(String environmentId, String topicName) { TopicMetadata topic = getTopic(environmentId, topicName).orElse(null); if (topic == null) { return CompletableFuture.failedFuture(new NoSuchElementException( "No topic with name " + topicName + " found on environment " + environmentId + ".")); } if (!subscriptionService.getSubscriptionsForTopic(environmentId, topicName, false).isEmpty()) { if (!this.schemaDeleteWithSub) { return CompletableFuture .failedFuture(new IllegalStateException("Schemas of subscribed Topics cannot be deleted!")); } return checkOnNonStaging(environmentId, "Delete latest schema") .orElseGet(() -> topicService.deleteLatestTopicSchemaVersion(environmentId, topicName)); } else { return topicService.deleteLatestTopicSchemaVersion(environmentId, topicName); } } @Override public CompletableFuture<Void> deleteTopic(String environmentId, String topicName) { if (!canDeleteTopic(environmentId, topicName)) { return CompletableFuture.failedFuture(new TopicInUseException( "The topic is currently in use by at least one application (other than owner application) and / or has been staged and thus cannot be deleted.")); } return topicService.deleteTopic(environmentId, topicName); } @Override public CompletableFuture<Void> updateTopicDescription(String environmentId, String topicName, String newDescription) { return checkOnNonStaging(environmentId, "update topic descriptions") .orElseGet(() -> topicService.updateTopicDescription(environmentId, topicName, newDescription)); } @Override public CompletableFuture<Void> markTopicDeprecated(String topicName, String deprecationText, LocalDate eolDate) { if (eolDate.isBefore(LocalDate.now().plus(topicConfig.getMinDeprecationTime()))) { return CompletableFuture .failedFuture(new IllegalArgumentException("EOL date for deprecated topic must be at least " + toDisplayString(topicConfig.getMinDeprecationTime()) + " in the future")); } return topicService.markTopicDeprecated(topicName, deprecationText, eolDate); } @Override public CompletableFuture<Void> unmarkTopicDeprecated(String topicName) { return topicService.unmarkTopicDeprecated(topicName); } @Override public CompletableFuture<Void> setSubscriptionApprovalRequiredFlag(String environmentId, String topicName, boolean subscriptionApprovalRequired) { return checkOnNonStaging(environmentId, "update subscriptionApprovalRequired flag").orElseGet(() -> topicService .setSubscriptionApprovalRequiredFlag(environmentId, topicName, subscriptionApprovalRequired)); } @Override public CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, String topicName, String jsonSchema, String changeDescription, SchemaCompatCheckMode skipCompatCheck) { return checkOnNonStaging(environmentId, "add JSON schemas", SchemaMetadata.class).orElseGet(() -> topicService .addTopicSchemaVersion(environmentId, topicName, jsonSchema, changeDescription, skipCompatCheck)); } @Override public CompletableFuture<SchemaMetadata> addTopicSchemaVersion(String environmentId, SchemaMetadata metadata, SchemaCompatCheckMode skipCompatCheck) { return topicService.addTopicSchemaVersion(environmentId, metadata, skipCompatCheck); } private Optional<CompletableFuture<Void>> checkOnNonStaging(String environmentId, String action) { return checkOnNonStaging(environmentId, action, Void.class); } private <T> Optional<CompletableFuture<T>> checkOnNonStaging(String environmentId, String action, Class<T> resultClass) { if (kafkaClusters.getEnvironmentMetadata(environmentId).map(KafkaEnvironmentConfig::isStagingOnly) .orElse(false)) { return Optional.of(CompletableFuture.failedFuture(new IllegalStateException("You may only " + action + " on non-staging-only environments. Use Staging to apply such a change on this environment."))); } return Optional.empty(); } private Optional<String> nextStageId(String environmentId) { List<String> environmentIds = kafkaClusters.getEnvironmentIds(); for (int i = 0; i < environmentIds.size() - 1; i++) { if (environmentId.equals(environmentIds.get(i))) { return Optional.of(environmentIds.get(i + 1)); } } return Optional.empty(); } @Override public List<TopicMetadata> listTopics(String environmentId) { return topicService.listTopics(environmentId); } @Override public Optional<TopicMetadata> getTopic(String environmentId, String topicName) { return topicService.getTopic(environmentId, topicName); } @Override public List<SchemaMetadata> getTopicSchemaVersions(String environmentId, String topicName) { return topicService.getTopicSchemaVersions(environmentId, topicName); } @Override public Optional<SchemaMetadata> getSchemaById(String environmentId, String schemaId) { return topicService.getSchemaById(environmentId, schemaId); } @Override public CompletableFuture<TopicCreateParams> buildTopicCreateParams(String environmentId, String topicName) { return topicService.buildTopicCreateParams(environmentId, topicName); } @Override public CompletableFuture<List<ConsumerRecord<String, String>>> peekTopicData(String environmentId, String topicName, int limit) { TopicMetadata metadata = getTopic(environmentId, topicName).orElse(null); // if metadata is null, topicService implementation will deal with it. if (metadata != null && metadata.isSubscriptionApprovalRequired() && !currentUserMayRead(environmentId, metadata)) { return CompletableFuture.failedFuture(new IllegalStateException( "You are not permitted to read from this topic. Subscribe one of your applications to this topic first.")); } return topicService.peekTopicData(environmentId, topicName, limit); } @Override public CompletableFuture<Void> addTopicProducer(String environmentId, String topicName, String producerId) { return checkOnNonStaging(environmentId, "add producer", Void.class) .orElseGet(() -> topicService.addTopicProducer(environmentId, topicName, producerId)); } @Override public CompletableFuture<Void> removeTopicProducer(String envId, String topicName, String appId) { return checkOnNonStaging(envId, "delete producer", Void.class) .orElseGet(() -> topicService.removeTopicProducer(envId, topicName, appId)); } @Override public CompletableFuture<Void> changeTopicOwner(String environmentId, String topicName, String newApplicationOwnerId) { return checkOnNonStaging(environmentId, "change Topic owner", Void.class) .orElseGet(() -> topicService.changeTopicOwner(environmentId, topicName, newApplicationOwnerId)); } private boolean currentUserMayRead(String environmentId, TopicMetadata metadata) { Set<String> subscribedApplications = subscriptionService .getSubscriptionsForTopic(environmentId, metadata.getName(), false).stream() .map(SubscriptionMetadata::getClientApplicationId).collect(Collectors.toSet()); subscribedApplications.add(metadata.getOwnerApplicationId()); return applicationsService.getUserApplicationOwnerRequests().stream() .anyMatch(r -> subscribedApplications.contains(r.getApplicationId())); } private static String toDisplayString(Period period) { // TODO rework for i18n StringBuilder sb = new StringBuilder(); Function<Integer, String> plural = i -> i > 1 ? "s" : ""; Function<StringBuilder, String> comma = s -> s.length() > 0 ? ", " : ""; if (period.getYears() > 0) { sb.append(comma.apply(sb)); sb.append(period.getYears()).append(" year").append(plural.apply(period.getYears())); } if (period.getMonths() > 0) { sb.append(comma.apply(sb)); sb.append(period.getMonths()).append(" month").append(plural.apply(period.getMonths())); } if (period.getDays() > 0) { sb.append(comma.apply(sb)); sb.append(period.getDays()).append(" day").append(plural.apply(period.getDays())); } return sb.toString(); } }
13,268
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionState.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/SubscriptionState.java
package com.hermesworld.ais.galapagos.subscriptions; public enum SubscriptionState { APPROVED, PENDING, REJECTED, CANCELED; }
133
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionMetadata.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/SubscriptionMetadata.java
package com.hermesworld.ais.galapagos.subscriptions; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.util.HasKey; import lombok.Getter; import lombok.Setter; /** * Descriptor of a "Subscription". A subscription is the logical "right" for an application to read from a Kafka Topic * (or, in case of <code>COMMAND</code> topics, write to that topic). Each subscription instance exists on only one * Kafka cluster, so an application may be subscribed to a topic on one cluster, but not on another. <br> * Subscriptions have a <i>state</i>, which usually is <code>APPROVED</code>. In case of topics for which the * <code>requiresSubscriptionApproval</code> flag is set, this state is initially <code>PENDING</code>, and, after * handling by the owner of the subscribed topic, is updated to <code>APPROVED</code> or <code>REJECTED</code>, * respectively. <br> * Additionally, users can specify a description / a reasoning for a subscription. This can be used for security audits * and / or synchronizations in external architecture tools. <br> * Subscriptions are a completely virtual construct only existing in Galapagos. They map best to the ACLs in Kafka which * are created so the subscribing applications can read from the desired topics (but note that these ACLs are not * created by the Subscriptions Service, but by the class <code>UpdateApplicationAclsListener</code>). * * @author AlbrechtFlo */ @JsonSerialize @JsonIgnoreProperties(ignoreUnknown = true) public class SubscriptionMetadata implements HasKey { @Getter @Setter private String id; @Getter @Setter private String clientApplicationId; @Getter @Setter private String topicName; /** * For backwards compatibility with old metadata, this defaults to APPROVED. */ @Setter private SubscriptionState state = SubscriptionState.APPROVED; @Getter @Setter private String description; public SubscriptionMetadata() { } public SubscriptionMetadata(SubscriptionMetadata original) { id = original.id; clientApplicationId = original.clientApplicationId; topicName = original.topicName; state = original.state; description = original.description; } @Override public String key() { return id; } public SubscriptionState getState() { return state == null ? SubscriptionState.APPROVED : state; } }
2,549
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CreateSubscriptionDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/controller/CreateSubscriptionDto.java
package com.hermesworld.ais.galapagos.subscriptions.controller; import lombok.Getter; import lombok.Setter; @Getter @Setter public class CreateSubscriptionDto { private String topicName; private String description; public CreateSubscriptionDto() { } }
274
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/controller/SubscriptionDto.java
package com.hermesworld.ais.galapagos.subscriptions.controller; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState; import lombok.Getter; @Getter public class SubscriptionDto { private final String id; private final String topicName; private final String environmentId; private final String clientApplicationId; private final SubscriptionState state; private final String description; public SubscriptionDto(String id, String topicName, String environmentId, String clientApplicationId, SubscriptionState state, String description) { this.id = id; this.topicName = topicName; this.environmentId = environmentId; this.clientApplicationId = clientApplicationId; this.state = state; this.description = description; } }
836
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionsController.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/controller/SubscriptionsController.java
package com.hermesworld.ais.galapagos.subscriptions.controller; import java.util.List; import java.util.NoSuchElementException; import java.util.concurrent.ExecutionException; import java.util.function.Supplier; import java.util.stream.Collectors; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import com.hermesworld.ais.galapagos.topics.service.TopicService; import lombok.extern.slf4j.Slf4j; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.*; import org.springframework.web.server.ResponseStatusException; @RestController @Slf4j public class SubscriptionsController { private final SubscriptionService subscriptionService; private final ApplicationsService applicationsService; private final TopicService topicService; private final KafkaClusters kafkaEnvironments; private final Supplier<ResponseStatusException> notFound = () -> new ResponseStatusException(HttpStatus.NOT_FOUND); public SubscriptionsController(SubscriptionService subscriptionService, ApplicationsService applicationsService, TopicService topicService, KafkaClusters kafkaEnvironments) { this.subscriptionService = subscriptionService; this.applicationsService = applicationsService; this.topicService = topicService; this.kafkaEnvironments = kafkaEnvironments; } @GetMapping(value = "/api/applications/{applicationId}/subscriptions/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE) public List<SubscriptionDto> getApplicationSubscriptions(@PathVariable String applicationId, @PathVariable String environmentId, @RequestParam(defaultValue = "false") boolean includeNonApproved) { applicationsService.getKnownApplication(applicationId).orElseThrow(notFound); kafkaEnvironments.getEnvironmentMetadata(environmentId).orElseThrow(notFound); return subscriptionService.getSubscriptionsOfApplication(environmentId, applicationId, includeNonApproved) .stream().map(sub -> toDto(environmentId, sub)).collect(Collectors.toList()); } @PutMapping(value = "/api/applications/{applicationId}/subscriptions/{environmentId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) public SubscriptionDto createApplicationSubscription(@PathVariable String applicationId, @PathVariable String environmentId, @RequestBody CreateSubscriptionDto createData) { KafkaEnvironmentConfig environment = kafkaEnvironments.getEnvironmentMetadata(environmentId) .orElseThrow(notFound); if (!applicationsService.isUserAuthorizedFor(applicationId) || environment.isStagingOnly()) { throw new ResponseStatusException(HttpStatus.FORBIDDEN); } try { SubscriptionMetadata subscription = subscriptionService.addSubscription(environmentId, createData.getTopicName(), applicationId, createData.getDescription()).get(); return toDto(environmentId, subscription); } catch (ExecutionException e) { throw handleExecutionException(e); } catch (InterruptedException e) { return null; } } @PostMapping(value = "/api/topics/{environmentId}/{topicName}/subscriptions/{subscriptionId}", consumes = MediaType.APPLICATION_JSON_VALUE) public void updateApplicationSubscription(@PathVariable String environmentId, @PathVariable String topicName, @PathVariable String subscriptionId, @RequestBody UpdateSubscriptionDto updateData) throws InterruptedException { if (updateData == null || updateData.getNewState() == null) { throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "New state for subscription must be provided"); } // plausi check on topic / subscription combination subscriptionService.getSubscriptionsForTopic(environmentId, topicName, true).stream() .filter(s -> s.getId().equals(subscriptionId)).findAny().orElseThrow(notFound); TopicMetadata topicMetadata = topicService.getTopic(environmentId, topicName).orElseThrow(notFound); // user must be authorized for Topic Owner application, not subscribing application! if (!applicationsService.isUserAuthorizedFor(topicMetadata.getOwnerApplicationId())) { throw new ResponseStatusException(HttpStatus.FORBIDDEN); } // Topic must have flag "subscriptionApprovalRequired", otherwise, update is not allowed! if (!topicMetadata.isSubscriptionApprovalRequired()) { throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "Subscription state cannot be updated for topics which do not require subscription approval"); } try { subscriptionService.updateSubscriptionState(environmentId, subscriptionId, updateData.getNewState()).get(); } catch (ExecutionException e) { throw handleExecutionException(e); } } @DeleteMapping(value = "/api/applications/{applicationId}/subscriptions/{environmentId}/{subscriptionId}") public void deleteApplicationSubscription(@PathVariable String applicationId, @PathVariable String environmentId, @PathVariable String subscriptionId) { if (!applicationsService.isUserAuthorizedFor(applicationId)) { throw new ResponseStatusException(HttpStatus.FORBIDDEN); } KafkaEnvironmentConfig environmentMeta = kafkaEnvironments.getEnvironmentMetadata(environmentId) .orElseThrow(notFound); if (environmentMeta.isStagingOnly()) { throw new ResponseStatusException(HttpStatus.FORBIDDEN); } try { subscriptionService.deleteSubscription(environmentId, subscriptionId).get(); } catch (ExecutionException e) { throw handleExecutionException(e); } catch (InterruptedException e) { return; } } @GetMapping(value = "/api/topics/{environmentId}/{topicName}/subscriptions", produces = MediaType.APPLICATION_JSON_VALUE) public List<SubscriptionDto> getTopicSubscriptions(@PathVariable String environmentId, @PathVariable String topicName, @RequestParam(defaultValue = "false") boolean includeNonApproved) { kafkaEnvironments.getEnvironmentMetadata(environmentId).orElseThrow(notFound); topicService.getTopic(environmentId, topicName).orElseThrow(notFound); return subscriptionService.getSubscriptionsForTopic(environmentId, topicName, includeNonApproved).stream() .map(sub -> toDto(environmentId, sub)).collect(Collectors.toList()); } private SubscriptionDto toDto(String environmentId, SubscriptionMetadata subscription) { return new SubscriptionDto(subscription.getId(), subscription.getTopicName(), environmentId, subscription.getClientApplicationId(), subscription.getState(), subscription.getDescription()); } private ResponseStatusException handleExecutionException(ExecutionException e) { Throwable t = e.getCause(); if (t instanceof IllegalArgumentException || t instanceof IllegalAccessException) { return new ResponseStatusException(HttpStatus.BAD_REQUEST, t.getMessage()); } if (t instanceof NoSuchElementException) { return new ResponseStatusException(HttpStatus.NOT_FOUND, t.getMessage()); } log.error("Unhandled exception when processing request", e); return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR); } }
8,108
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UpdateSubscriptionDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/controller/UpdateSubscriptionDto.java
package com.hermesworld.ais.galapagos.subscriptions.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter public class UpdateSubscriptionDto { private SubscriptionState newState; }
357
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/service/SubscriptionService.java
package com.hermesworld.ais.galapagos.subscriptions.service; import java.util.List; import java.util.concurrent.CompletableFuture; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState; public interface SubscriptionService { CompletableFuture<SubscriptionMetadata> addSubscription(String environmentId, String topicName, String applicationId, String description); /** * Adds a subscription to a given environment. This method does apply less business logic than * {@link #addSubscription(String, String, String, String)}, but still generates a <b>new</b> Subscription Metadata * object with a new ID. This method is useful for staging, where some business rules, e.g. regarding protected * topics, shall be bypassed. Contrary to the mentioned other method, this method copies the current state of the * given metadata object into the new object. * * @param environmentId ID of the Kafka Environment to create the subscription on. * @param subscription Metadata of an existing subscription (usually from a different environment). Its * <code>id</code> property is <b>not</b> used when creating the new subscription. Other fields * are copied. * @return A Completable Future which completes when the subscription has been added successfully and returns the * newly created subscription metadata, or which fails in case of any error. */ CompletableFuture<SubscriptionMetadata> addSubscription(String environmentId, SubscriptionMetadata subscription); CompletableFuture<Void> deleteSubscription(String environmentId, String subscriptionId); List<SubscriptionMetadata> getSubscriptionsForTopic(String environmentId, String topicName, boolean includeNonApproved); List<SubscriptionMetadata> getSubscriptionsOfApplication(String environmentId, String applicationId, boolean includeNonApproved); CompletableFuture<Void> updateSubscriptionState(String environmentId, String subscriptionId, SubscriptionState newState); }
2,202
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionTopicListener.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/service/impl/SubscriptionTopicListener.java
package com.hermesworld.ais.galapagos.subscriptions.service.impl; import com.hermesworld.ais.galapagos.events.*; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState; import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService; import com.hermesworld.ais.galapagos.util.FutureUtil; import org.springframework.stereotype.Component; import java.util.concurrent.CompletableFuture; /** * Listener which performs several updates when topic events occur: * <ul> * <li>When a topic is deleted, all associated subscriptions are deleted. Note that this can only occur for deprecated * topics after their "end of life" date; otherwise, the <code>TopicService</code> would prohibit deleting topics with * active subscriptions.</li> * <li>Updates <code>PENDING</code> subscriptions to {@link SubscriptionState#APPROVED} when the flag * <code>subscriptionApprovalRequired</code> of the associated topic is updated.</li> * </ul> * * @author AlbrechtFlo * */ @Component public class SubscriptionTopicListener implements TopicEventsListener { private final SubscriptionService subscriptionService; public SubscriptionTopicListener(SubscriptionService subscriptionService) { this.subscriptionService = subscriptionService; } @Override public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) { KafkaCluster cluster = event.getContext().getKafkaCluster(); CompletableFuture<Void> result = FutureUtil.noop(); for (SubscriptionMetadata subscription : subscriptionService.getSubscriptionsForTopic(cluster.getId(), event.getMetadata().getName(), true)) { result = result .thenCompose(o -> subscriptionService.deleteSubscription(cluster.getId(), subscription.getId())); } return result; } @Override public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) { KafkaCluster cluster = event.getContext().getKafkaCluster(); CompletableFuture<Void> result = FutureUtil.noop(); for (SubscriptionMetadata subscription : subscriptionService.getSubscriptionsForTopic(cluster.getId(), event.getMetadata().getName(), true)) { if (subscription.getState() == SubscriptionState.PENDING) { result = subscriptionService.updateSubscriptionState(cluster.getId(), subscription.getId(), SubscriptionState.APPROVED); } } return result; } @Override public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) { return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) { return FutureUtil.noop(); } }
3,960
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionServiceImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/subscriptions/service/impl/SubscriptionServiceImpl.java
package com.hermesworld.ais.galapagos.subscriptions.service.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.events.GalapagosEventManager; import com.hermesworld.ais.galapagos.events.GalapagosEventSink; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState; import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import com.hermesworld.ais.galapagos.topics.TopicType; import com.hermesworld.ais.galapagos.topics.service.TopicService; import com.hermesworld.ais.galapagos.util.FutureUtil; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; import java.util.*; import java.util.concurrent.CompletableFuture; import java.util.function.BiFunction; import java.util.function.Predicate; import java.util.stream.Collectors; @Component public class SubscriptionServiceImpl implements SubscriptionService, InitPerCluster { private final KafkaClusters kafkaEnvironments; private final ApplicationsService applicationsService; private final TopicService topicService; private final GalapagosEventManager eventManager; private static final String TOPIC_NAME = "subscriptions"; public SubscriptionServiceImpl(KafkaClusters kafkaEnvironments, ApplicationsService applicationsService, @Qualifier(value = "nonvalidating") TopicService topicService, GalapagosEventManager eventManager) { this.kafkaEnvironments = kafkaEnvironments; this.applicationsService = applicationsService; this.topicService = topicService; this.eventManager = eventManager; } @Override public void init(KafkaCluster cluster) { getRepository(cluster).getObjects(); } @Override public CompletableFuture<SubscriptionMetadata> addSubscription(String environmentId, SubscriptionMetadata subscriptionMetadata) { KafkaCluster kafkaCluster = kafkaEnvironments.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return noSuchEnvironment(environmentId); } TopicMetadata topic = topicService.getTopic(environmentId, subscriptionMetadata.getTopicName()).orElse(null); if (topic == null) { return noSuchTopic(environmentId, subscriptionMetadata.getTopicName()); } ApplicationMetadata application = applicationsService .getApplicationMetadata(environmentId, subscriptionMetadata.getClientApplicationId()).orElse(null); if (application == null) { return CompletableFuture.failedFuture(new NoSuchElementException( "Application not registered on this environment. Please create authentication data first.")); } if (topic.getType() == TopicType.INTERNAL) { return CompletableFuture .failedFuture(new IllegalArgumentException("Cannot subscribe to application internal topics")); } List<SubscriptionMetadata> subscriptionsForTopic = getSubscriptionsForTopic(environmentId, subscriptionMetadata.getTopicName(), true); for (var subscription : subscriptionsForTopic) { if (Objects.equals(subscription.getClientApplicationId(), subscriptionMetadata.getClientApplicationId())) { return CompletableFuture.failedFuture(new IllegalArgumentException( "A subscription of this topic for this application already exists.")); } } SubscriptionMetadata subscription = new SubscriptionMetadata(); subscription.setId(UUID.randomUUID().toString()); subscription.setTopicName(subscriptionMetadata.getTopicName()); subscription.setClientApplicationId(subscriptionMetadata.getClientApplicationId()); subscription.setState(subscriptionMetadata.getState()); subscription.setDescription(subscriptionMetadata.getDescription()); GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); return getRepository(kafkaCluster).save(subscription) .thenCompose(o -> eventSink.handleSubscriptionCreated(subscription)).thenApply(o -> subscription); } @Override public CompletableFuture<SubscriptionMetadata> addSubscription(String environmentId, String topicName, String applicationId, String description) { KafkaCluster kafkaCluster = kafkaEnvironments.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return noSuchEnvironment(environmentId); } TopicMetadata topic = topicService.getTopic(environmentId, topicName).orElse(null); if (topic == null) { return noSuchTopic(environmentId, topicName); } SubscriptionMetadata subscription = new SubscriptionMetadata(); subscription.setId(UUID.randomUUID().toString()); subscription.setTopicName(topicName); subscription.setClientApplicationId(applicationId); subscription.setState( topic.isSubscriptionApprovalRequired() ? SubscriptionState.PENDING : SubscriptionState.APPROVED); subscription.setDescription(description); return addSubscription(environmentId, subscription); } @Override public CompletableFuture<Void> updateSubscriptionState(String environmentId, String subscriptionId, SubscriptionState newState) { return doWithClusterAndSubscription(environmentId, subscriptionId, (kafkaCluster, subscription) -> { SubscriptionState state = newState; if (state.equals(subscription.getState())) { return FutureUtil.noop(); } // REJECTED is only possible for PENDING subscriptions. Change to CANCELED automatically for APPROVEDs. if (state == SubscriptionState.REJECTED && subscription.getState() == SubscriptionState.APPROVED) { state = SubscriptionState.CANCELED; } subscription.setState(state); GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); // REJECTED and CANCELED subscriptions will instantly be deleted, to allow re-submission return (state == SubscriptionState.REJECTED || state == SubscriptionState.CANCELED ? getRepository(kafkaCluster).delete(subscription) : getRepository(kafkaCluster).save(subscription)) .thenCompose(o -> eventSink.handleSubscriptionUpdated(subscription)); }); } @Override public CompletableFuture<Void> deleteSubscription(String environmentId, String subscriptionId) { return doWithClusterAndSubscription(environmentId, subscriptionId, (kafkaCluster, subscription) -> { GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster); return getRepository(kafkaCluster).delete(subscription) .thenCompose(o -> eventSink.handleSubscriptionDeleted(subscription)); }); } @Override public List<SubscriptionMetadata> getSubscriptionsForTopic(String environmentId, String topicName, boolean includeNonApproved) { Predicate<SubscriptionMetadata> inclusionFilter = inclusionFilter(includeNonApproved); return kafkaEnvironments.getEnvironment(environmentId) .map(cluster -> getRepository(cluster).getObjects().stream() .filter(s -> topicName.equals(s.getTopicName())).filter(inclusionFilter) .collect(Collectors.toList())) .orElse(Collections.emptyList()); } @Override public List<SubscriptionMetadata> getSubscriptionsOfApplication(String environmentId, String applicationId, boolean includeNonApproved) { Predicate<SubscriptionMetadata> inclusionFilter = inclusionFilter(includeNonApproved); return kafkaEnvironments.getEnvironment(environmentId) .map(cluster -> getRepository(cluster).getObjects().stream() .filter(s -> applicationId.equals(s.getClientApplicationId())).filter(inclusionFilter) .collect(Collectors.toList())) .orElse(Collections.emptyList()); } private TopicBasedRepository<SubscriptionMetadata> getRepository(KafkaCluster kafkaCluster) { return kafkaCluster.getRepository(TOPIC_NAME, SubscriptionMetadata.class); } private <T> CompletableFuture<T> doWithClusterAndSubscription(String environmentId, String subscriptionId, BiFunction<KafkaCluster, SubscriptionMetadata, CompletableFuture<T>> task) { KafkaCluster kafkaCluster = kafkaEnvironments.getEnvironment(environmentId).orElse(null); if (kafkaCluster == null) { return noSuchEnvironment(environmentId); } SubscriptionMetadata subscription = getRepository(kafkaCluster).getObject(subscriptionId).orElse(null); if (subscription == null) { return CompletableFuture.failedFuture(new NoSuchElementException()); } return task.apply(kafkaCluster, subscription); } private static <T> CompletableFuture<T> noSuchEnvironment(String environmentId) { return CompletableFuture .failedFuture(new NoSuchElementException("No environment with ID " + environmentId + " found.")); } private static <T> CompletableFuture<T> noSuchTopic(String environmentId, String topicName) { return CompletableFuture.failedFuture(new NoSuchElementException( "No topic with name " + topicName + " found on environment " + environmentId + ".")); } private static Predicate<SubscriptionMetadata> inclusionFilter(boolean includeNonApproved) { return includeNonApproved ? s -> true : s -> s.getState() == SubscriptionState.APPROVED; } }
10,417
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AdminJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/AdminJob.java
package com.hermesworld.ais.galapagos.adminjobs; import org.springframework.boot.ApplicationArguments; /** * Interface for admin jobs which are run via command line, but require full Spring & Galapagos stack (especially Kafka * connections) to start up. <br> * To run an admin job, invoke Galapagos with the special parameter <code>--galapagos.jobs.<i>&lt;jobName></i></code>, * e.g. <code>--galapagos.jobs.import-known-applications</code>. You can find the name of each job in the subclasses of * this interface. Required and optional additional parameters are also documented in the subclasses. <br> * This pattern follows <a href="https://12factor.net/admin-processes">principle #12</a> of <i>twelve-factor apps</i>, * delegating one-off or scripted admin jobs to separate process instances, but in an identical environment and with the * same configuration as the production application. * * @author AlbrechtFlo * */ public interface AdminJob { String getJobName(); void run(ApplicationArguments allArguments) throws Exception; }
1,059
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ToolingUser.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/ToolingUser.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.kafka.KafkaUser; import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule; import com.hermesworld.ais.galapagos.kafka.util.AclSupport; import org.apache.kafka.common.acl.AclBinding; import org.json.JSONException; import org.json.JSONObject; import org.springframework.util.StringUtils; import java.util.Collection; class ToolingUser implements KafkaUser { private final ApplicationMetadata metadata; private final String environmentId; private final KafkaAuthenticationModule authenticationModule; private final AclSupport aclSupport; public ToolingUser(ApplicationMetadata metadata, String environmentId, KafkaAuthenticationModule authenticationModule, AclSupport aclSupport) { this.metadata = metadata; this.environmentId = environmentId; this.authenticationModule = authenticationModule; this.aclSupport = aclSupport; } @Override public String getKafkaUserName() { if (!StringUtils.hasLength(metadata.getAuthenticationJson())) { throw new JSONException("No authentication JSON stored for application " + metadata.getApplicationId()); } return authenticationModule.extractKafkaUserName(new JSONObject(metadata.getAuthenticationJson())); } @Override public Collection<AclBinding> getRequiredAclBindings() { return aclSupport.getRequiredAclBindings(environmentId, metadata, getKafkaUserName(), false); } }
1,631
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ViewAclsJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/ViewAclsJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import org.apache.kafka.common.acl.AclBinding; import org.json.JSONArray; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; /** * Admin job to retrieve ALL ACL entries of a given Kafka Cluster, in JSON format. <br> * Note that the result is <b>not</b> limited to ACL entries generated by Galapagos. <br> * The job requires one parameter: * <ul> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to return the ACL entries of, as * configured for Galapagos.</li> * </ul> * * @author AlbrechtFlo * */ @Component public class ViewAclsJob extends SingleClusterAdminJob { public ViewAclsJob(KafkaClusters kafkaClusters) { super(kafkaClusters); } @Override public String getJobName() { return "view-acls"; } @Override public void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { JSONArray acls = new JSONArray(); cluster.visitAcls(acl -> { acls.put(toJsonObject(acl)); return true; }).get(); System.out.println(); System.out.println(); System.out.println(acls.length() + " ACLs found:"); System.out.println(acls); System.out.println(); System.out.println(); } private JSONObject toJsonObject(AclBinding aclBinding) { JSONObject pattern = new JSONObject(); pattern.put("resourceType", aclBinding.pattern().resourceType().toString()); pattern.put("patternType", aclBinding.pattern().patternType().toString()); pattern.put("name", aclBinding.pattern().name()); JSONObject entry = new JSONObject(); entry.put("principal", aclBinding.entry().principal()); entry.put("host", aclBinding.entry().host()); entry.put("operation", aclBinding.entry().operation().toString()); entry.put("permissionType", aclBinding.entry().permissionType().toString()); JSONObject result = new JSONObject(); result.put("pattern", pattern); result.put("entry", entry); return result; } }
2,350
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
DeleteAclsJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/DeleteAclsJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import java.util.Collection; import java.util.Collections; import java.util.Optional; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.KafkaUser; import org.apache.kafka.common.acl.AclBinding; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import org.springframework.util.ObjectUtils; /** * Admin job to explicitly delete ACLs from a Kafka Cluster. This job is useful if something went terribly wrong with * Galapagos, or if some rights have to be revoked quickly. <br> * It can also be used to remove the ACLs previously generated with the {@link GenerateToolingCertificateJob}. <br> * The job requires two parameters: * <ul> * <li><code>--certificate.dn=<i>&lt;dn></i> - The Distinguished Name of the certificate to remove the associated ACLs * of.</li> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to operate on, as configured for * Galapagos.</li> * </ul> * * @author AlbrechtFlo * */ @Component public class DeleteAclsJob extends SingleClusterAdminJob { public DeleteAclsJob(KafkaClusters kafkaClusters) { super(kafkaClusters); } @Override public String getJobName() { return "delete-acls"; } @Override public void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { String certificateDn = Optional.ofNullable(allArguments.getOptionValues("certificate.dn")) .flatMap(ls -> ls.stream().findFirst()).orElse(null); if (ObjectUtils.isEmpty(certificateDn)) { throw new IllegalArgumentException("Please provide --certificate.dn=<dn> for DN of certificate."); } cluster.removeUserAcls(new DummyKafkaUser(certificateDn)).get(); System.out.println(); System.out.println("========================== Certificate ACLs DELETED =========================="); System.out.println(); System.out.println("All ACLs for certificate " + certificateDn + " have been deleted on Kafka Environment " + cluster.getId()); System.out.println(); System.out.println("=============================================================================="); } private static class DummyKafkaUser implements KafkaUser { private final String dn; public DummyKafkaUser(String dn) { this.dn = dn; } @Override public String getKafkaUserName() { return "User:" + dn; } @Override public Collection<AclBinding> getRequiredAclBindings() { return Collections.emptyList(); } } }
2,842
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GenerateToolingCertificateJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/GenerateToolingCertificateJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.applications.impl.KnownApplicationImpl; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult; import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentsConfig; import com.hermesworld.ais.galapagos.kafka.util.AclSupport; import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes; import com.hermesworld.ais.galapagos.naming.NamingService; import com.hermesworld.ais.galapagos.util.CertificateUtil; import org.bouncycastle.asn1.x500.X500Name; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import org.springframework.util.ObjectUtils; import java.io.ByteArrayInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.security.KeyPair; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import java.util.Base64; import java.util.List; import java.util.Map; import java.util.Optional; /** * Admin job to generate a "tooling" certificate for one of the Kafka Clusters configured for Galapagos. <br> * Such "tooling" certificate is e.g. required for operating the Galapagos LeanIX synchronizer microservice. <br> * The job has two parameters: * <ul> * <li><code>--output.filename=<i>&lt;p12-file></i> - The name of a file to receive the generated PKCS12 keystore. If * not given, the PKCS12 data is written to STDOUT, Base64 encoded.</li> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to generate the certificate for, as * configured for Galapagos.</li> * </ul> * Note that this job can only generate certificates for environments using <code>certificates</code> authentication * mode. * * @author AlbrechtFlo * */ @Component public class GenerateToolingCertificateJob extends SingleClusterAdminJob { private final AclSupport aclSupport; private final NamingService namingService; private final KafkaEnvironmentsConfig kafkaConfig; public GenerateToolingCertificateJob(KafkaClusters kafkaClusters, AclSupport aclSupport, NamingService namingService, KafkaEnvironmentsConfig kafkaConfig) { super(kafkaClusters); this.aclSupport = aclSupport; this.namingService = namingService; this.kafkaConfig = kafkaConfig; } @Override public String getJobName() { return "generate-galapagos-tooling-certificate"; } @Override public void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { String outputFilename = Optional.ofNullable(allArguments.getOptionValues("output.filename")) .flatMap(ls -> ls.stream().findFirst()).orElse(null); KafkaEnvironmentConfig metadata = kafkaClusters.getEnvironmentMetadata(cluster.getId()).orElseThrow(); if (!"certificates".equals(metadata.getAuthenticationMode())) { throw new IllegalStateException("Environment " + cluster.getId() + " does not use certificates for authentication. Cannot generate tooling certificate."); } if (!ObjectUtils.isEmpty(outputFilename)) { try { new FileOutputStream(outputFilename).close(); } catch (IOException e) { throw new IllegalArgumentException("Cannot write output file " + outputFilename); } } KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow(); // Create a CSR on the fly to enable certificate generation also on PROD environments KeyPair keyPair = CertificateUtil.generateKeyPair(); X500Name name = CertificateUtil.uniqueX500Name("galapagos"); PKCS10CertificationRequest request = CertificateUtil.buildCsr(name, keyPair); String csrData = CertificateUtil.toPemString(request); CreateAuthenticationResult result = authModule.createApplicationAuthentication("galapagos", "galapagos", new JSONObject(Map.of("generateKey", false, "csrData", csrData))).get(); // build P12 file from certificate and private key CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); X509Certificate cert = (X509Certificate) certFactory .generateCertificate(new ByteArrayInputStream(result.getPrivateAuthenticationData())); byte[] p12Data = CertificateUtil.buildPrivateKeyStore(cert, keyPair.getPrivate(), "changeit".toCharArray()); ApplicationMetadata toolMetadata = new ApplicationMetadata(); toolMetadata.setApplicationId("galapagos_tooling"); toolMetadata.setAuthenticationJson(result.getPublicAuthenticationData().toString()); KnownApplication dummyApp = new KnownApplicationImpl("galapagos", "Galapagos"); ApplicationPrefixes prefixes = namingService.getAllowedPrefixes(dummyApp); // intentionally use config value here - could differ e.g. for Galapagos test instance on same cluster toolMetadata.setInternalTopicPrefixes(List.of(kafkaConfig.getMetadataTopicsPrefix())); toolMetadata.setConsumerGroupPrefixes(prefixes.getConsumerGroupPrefixes()); toolMetadata.setTransactionIdPrefixes(prefixes.getTransactionIdPrefixes()); cluster.updateUserAcls(new ToolingUser(toolMetadata, cluster.getId(), authModule, aclSupport)).get(); if (!ObjectUtils.isEmpty(outputFilename)) { try (FileOutputStream fos = new FileOutputStream(outputFilename)) { fos.write(p12Data); } } else { String base64Data = Base64.getEncoder().encodeToString(p12Data); System.out.println("CERTIFICATE DATA: " + base64Data); } System.out.println(); System.out.println("==================== Galapagos Tooling Certificate CREATED ===================="); System.out.println(); if (!ObjectUtils.isEmpty(outputFilename)) { System.out.println("You can now use the certificate in " + outputFilename + " for Galapagos external tooling on " + metadata.getName()); } else { System.out.println( "You can now use the certificate (which is encoded above) for Galapagos external tooling on " + metadata.getName()); } System.out.println(); System.out.println("Use bootstrap servers " + metadata.getBootstrapServers()); System.out.println(); System.out.println("The following Kafka prefixes can be used and accessed, using the certificate: "); System.out.println(); System.out.println("Internal Topics: " + toolMetadata.getInternalTopicPrefixes().get(0) + "*"); System.out.println("Consumer Groups: " + toolMetadata.getConsumerGroupPrefixes().get(0) + "*"); System.out.println("Transactional IDs: " + toolMetadata.getTransactionIdPrefixes().get(0) + "*"); System.out.println(); System.out.println(); System.out.println("The certificate expires at " + cert.getNotAfter()); System.out.println(); System.out.println("To remove ACLs for this certificate, run Galapagos admin task galapagos.jobs.delete-acls"); System.out.println("with --certificate.dn=" + result.getPublicAuthenticationData().getString("dn") + " --kafka.environment=" + cluster.getId()); System.out.println(); System.out.println("=============================================================================="); } }
8,148
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UpdateConfluentAuthMetadataJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/UpdateConfluentAuthMetadataJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthenticationModule; import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata; import com.hermesworld.ais.galapagos.devauth.DeveloperAuthenticationService; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import java.util.List; @Component public class UpdateConfluentAuthMetadataJob implements AdminJob { private final KafkaClusters kafkaClusters; private final ApplicationsService applicationsService; private final DeveloperAuthenticationService devAuthService; public UpdateConfluentAuthMetadataJob(KafkaClusters kafkaClusters, ApplicationsService applicationsService, DeveloperAuthenticationService devAuthService) { this.kafkaClusters = kafkaClusters; this.applicationsService = applicationsService; this.devAuthService = devAuthService; } @Override public String getJobName() { return "update-confluent-auth-metadata"; } @Override public void run(ApplicationArguments allArguments) throws Exception { for (String environmentId : kafkaClusters.getEnvironmentIds()) { KafkaAuthenticationModule authenticationModule = kafkaClusters.getAuthenticationModule(environmentId) .orElse(null); if (!(authenticationModule instanceof ConfluentCloudAuthenticationModule)) { continue; } KafkaCluster cluster = kafkaClusters.getEnvironment(environmentId).orElseThrow(); TopicBasedRepository<ApplicationMetadata> appMetadataRepo = cluster.getRepository("application-metadata", ApplicationMetadata.class); TopicBasedRepository<DevAuthenticationMetadata> devAuthRepo = cluster.getRepository("devauth", DevAuthenticationMetadata.class); ConfluentCloudAuthenticationModule confluentCloudAuthenticationModule = (ConfluentCloudAuthenticationModule) authenticationModule; List<ApplicationMetadata> allApplicationMetadata = applicationsService .getAllApplicationMetadata(environmentId); for (ApplicationMetadata app : allApplicationMetadata) { JSONObject authenticationJson = new JSONObject(app.getAuthenticationJson()); JSONObject newAuthJson = confluentCloudAuthenticationModule.upgradeAuthMetadata(authenticationJson) .get(); if (!newAuthJson.toString().equals(authenticationJson.toString())) { System.out.println("Upgrading authentication for app " + app.getApplicationId()); app.setAuthenticationJson(newAuthJson.toString()); appMetadataRepo.save(app).get(); } } List<DevAuthenticationMetadata> allDevAuth = devAuthService.getAllDeveloperAuthentications(environmentId); for (DevAuthenticationMetadata auth : allDevAuth) { JSONObject authenticationJson = new JSONObject(auth.getAuthenticationJson()); JSONObject newAuthJson = confluentCloudAuthenticationModule.upgradeAuthMetadata(authenticationJson) .get(); if (!newAuthJson.toString().equals(authenticationJson.toString())) { System.out.println("Upgrading authentication for developer " + auth.getUserName()); auth.setAuthenticationJson(newAuthJson.toString()); devAuthRepo.save(auth).get(); } } } } }
4,180
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
NoUpdatesAdminClient.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/NoUpdatesAdminClient.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.kafka.KafkaClusterAdminClient; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.acl.AclBinding; import org.apache.kafka.common.acl.AclBindingFilter; import org.apache.kafka.common.config.ConfigResource; import java.util.Collection; import java.util.Map; /** * Helper class used by admin jobs providing a "dry run". This class wraps an existing Kafka AdminClient and throws an * <code>UnsupportedOperationException</code> whenever a method is called which would modify something in the Kafka * cluster. Admin jobs subclass this class and override the calls they are interested in. */ public abstract class NoUpdatesAdminClient implements KafkaClusterAdminClient { private final KafkaClusterAdminClient delegate; public NoUpdatesAdminClient(KafkaClusterAdminClient delegate) { this.delegate = delegate; } @Override public KafkaFuture<Void> createTopic(NewTopic topic) { throw new UnsupportedOperationException(); } @Override public KafkaFuture<Void> deleteTopic(String topicName) { throw new UnsupportedOperationException(); } @Override public KafkaFuture<TopicDescription> describeTopic(String topicName) { return delegate.describeTopic(topicName); } @Override public KafkaFuture<Collection<Node>> describeCluster() { return delegate.describeCluster(); } @Override public KafkaFuture<Collection<AclBinding>> describeAcls(AclBindingFilter filter) { return delegate.describeAcls(filter); } @Override public KafkaFuture<Void> createAcls(Collection<AclBinding> acls) { throw new UnsupportedOperationException(); } @Override public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) { throw new UnsupportedOperationException(); } @Override public KafkaFuture<Config> describeConfigs(ConfigResource resource) { return delegate.describeConfigs(resource); } @Override public KafkaFuture<Void> incrementalAlterConfigs(ConfigResource resource, Map<String, String> configValues) { throw new UnsupportedOperationException(); } }
2,472
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GenerateToolingApiKeyJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/GenerateToolingApiKeyJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.applications.impl.KnownApplicationImpl; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult; import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentsConfig; import com.hermesworld.ais.galapagos.kafka.util.AclSupport; import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes; import com.hermesworld.ais.galapagos.naming.NamingService; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import java.util.List; /** * Admin job to generate a "tooling" API Key for one of the Kafka Clusters configured for Galapagos. <br> * Such "tooling" API Key is e.g. required for operating the Galapagos LeanIX synchronizer microservice. <br> * The job has one parameter: * <ul> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to generate the API Key for, as * configured for Galapagos.</li> * </ul> * Note that this job can only generate API Keys for environments using <code>ccloud</code> authentication mode. * * @author PolatEmr * */ @Component public class GenerateToolingApiKeyJob extends SingleClusterAdminJob { private final NamingService namingService; private final KafkaEnvironmentsConfig kafkaConfig; private final AclSupport aclSupport; public GenerateToolingApiKeyJob(KafkaClusters kafkaClusters, AclSupport aclSupport, NamingService namingService, KafkaEnvironmentsConfig kafkaConfig) { super(kafkaClusters); this.aclSupport = aclSupport; this.namingService = namingService; this.kafkaConfig = kafkaConfig; } @Override public String getJobName() { return "generate-galapagos-tooling-apikey"; } @Override public void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { KafkaEnvironmentConfig metadata = kafkaClusters.getEnvironmentMetadata(cluster.getId()).orElseThrow(); if (!"ccloud".equals(metadata.getAuthenticationMode())) { throw new IllegalStateException("Environment " + cluster.getId() + " does not use API Keys for authentication. Cannot generate tooling API Key."); } KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow(); CreateAuthenticationResult result = authModule .createApplicationAuthentication("galapagos", "galapagos", new JSONObject()).get(); ApplicationMetadata toolMetadata = new ApplicationMetadata(); toolMetadata.setApplicationId("galapagos_tooling"); toolMetadata.setAuthenticationJson(result.getPublicAuthenticationData().toString()); KnownApplication dummyApp = new KnownApplicationImpl("galapagos", "Galapagos"); ApplicationPrefixes prefixes = namingService.getAllowedPrefixes(dummyApp); // intentionally use config value here - could differ e.g. for Galapagos test instance on same cluster toolMetadata.setInternalTopicPrefixes(List.of(kafkaConfig.getMetadataTopicsPrefix())); toolMetadata.setConsumerGroupPrefixes(prefixes.getConsumerGroupPrefixes()); toolMetadata.setTransactionIdPrefixes(prefixes.getTransactionIdPrefixes()); cluster.updateUserAcls(new ToolingUser(toolMetadata, cluster.getId(), authModule, aclSupport)).get(); System.out.println(); String samlUsername = new JSONObject(toolMetadata.getAuthenticationJson()).getString("apiKey"); String secret = new String(result.getPrivateAuthenticationData()); System.out.println("SAML Username: " + samlUsername); System.out.println("Secret: " + secret); System.out.println(); System.out.println("==================== Galapagos Tooling API Key CREATED ===================="); System.out.println(); System.out.println("You can now use the API Key above for Galapagos external tooling on " + metadata.getName()); System.out.println(); System.out.println("Use bootstrap servers " + metadata.getBootstrapServers()); System.out.println(); System.out.println("The following Kafka prefixes can be used and accessed, using the API Key: "); System.out.println(); System.out.println("Internal Topics: " + toolMetadata.getInternalTopicPrefixes().get(0) + "*"); System.out.println("Consumer Groups: " + toolMetadata.getConsumerGroupPrefixes().get(0) + "*"); System.out.println("Transactional IDs: " + toolMetadata.getTransactionIdPrefixes().get(0) + "*"); System.out.println(); System.out.println(); System.out.println(); System.out.println( "To remove ACLs for this API Key AND to delete the key itself, run Galapagos admin task galapagos.jobs.delete-apikey"); System.out.println("with --kafka.environment=" + cluster.getId()); System.out.println(); System.out.println("=============================================================================="); } }
5,574
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ImportKnownApplicationsJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/ImportKnownApplicationsJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.*; import java.util.stream.Collectors; import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.type.TypeFactory; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.applications.BusinessCapability; import com.hermesworld.ais.galapagos.applications.impl.KnownApplicationImpl; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.util.JsonUtil; import lombok.extern.slf4j.Slf4j; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import org.springframework.util.ObjectUtils; import org.springframework.util.StreamUtils; /** * Admin job to import known applications from a JSON file (or STDIN) to the global Galapagos topic * <code>known-applications</code>. This job can be used for development or test instances of Galapagos to import test * data, or it can be used on production instances if no direct import on the Kafka topic is possible. <br> * The job requires the parameter <code>--applications.import.file=&lt;file></code>. <i>file</i> can also be the special * value <code>-</code>, in which case the JSON data is read from STDIN. <br> * An optional parameter <code>--remove.missing.applications=true</code> can be used to force removal of applications * which are stored in the internal topic, but cannot be found in the JSON data. By default, these applications are * <b>not</b> removed. <br> * The JSON data must be a valid JSON array. Each element of the JSON array must be a JSON Object which can be parsed as * {@link KnownApplicationImpl} instance. * * @author AlbrechtFlo * */ @Component @Slf4j public class ImportKnownApplicationsJob implements AdminJob { private KafkaClusters kafkaClusters; public ImportKnownApplicationsJob(KafkaClusters kafkaClusters) { this.kafkaClusters = kafkaClusters; } @Override public String getJobName() { return "import-known-applications"; } @Override public void run(ApplicationArguments allArguments) throws Exception { String jsonFile = Optional.ofNullable(allArguments.getOptionValues("applications.import.file")) .map(ls -> ls.stream().findFirst().orElse(null)).orElse(null); boolean remove = Optional.ofNullable(allArguments.getOptionValues("remove.missing.applications")) .map(ls -> ls.stream().findFirst().orElse(null)).map(s -> s == null ? false : Boolean.parseBoolean(s)) .orElse(false); if (ObjectUtils.isEmpty(jsonFile)) { throw new IllegalArgumentException("Please provide --applications.import.file=<file> for JSON to import"); } List<KnownApplicationImpl> imported; // STDIN also supported! if ("-".equals(jsonFile)) { imported = readFromStdin(); } else { imported = readFromFile(jsonFile); } TopicBasedRepository<KnownApplicationImpl> repo = kafkaClusters.getGlobalRepository("known-applications", KnownApplicationImpl.class); // give repo 10 seconds to get all data from Kafka (if any) log.info("Waiting for existing known applications to be retrieved from Kafka..."); try { Thread.sleep(Duration.ofSeconds(10).toMillis()); } catch (InterruptedException e) { return; } int cntImported = 0; for (KnownApplicationImpl application : imported) { boolean shouldImport = repo.getObject(application.getId()).map(app -> !isEqualTo(application, app)) .orElse(true); if (shouldImport) { repo.save(application).get(); cntImported++; } } Set<String> importedIds = imported.stream().map(app -> app.getId()).collect(Collectors.toSet()); int cntDeleted = 0; if (remove) { for (KnownApplicationImpl app : repo.getObjects()) { if (!importedIds.contains(app.getId())) { repo.delete(app).get(); cntDeleted++; } } } System.out.println(); System.out.println("========================= Known applications IMPORTED ========================"); System.out.println(); System.out.println(cntImported + " new application(s) imported."); if (remove) { System.out.println(cntDeleted + " application(s) removed as they did not exist in JSON data."); } System.out.println(); System.out.println("=============================================================================="); } private List<KnownApplicationImpl> readFromStdin() throws IOException { return readFromJsonString(StreamUtils.copyToString(System.in, Charset.defaultCharset())); } private List<KnownApplicationImpl> readFromFile(String file) throws IOException { File f = new File(file); try (FileInputStream fis = new FileInputStream(f)) { return readFromJsonString(StreamUtils.copyToString(fis, StandardCharsets.UTF_8)); } } private List<KnownApplicationImpl> readFromJsonString(String data) throws IOException { ObjectMapper mapper = JsonUtil.newObjectMapper(); JavaType tp = TypeFactory.defaultInstance().constructArrayType(KnownApplicationImpl.class); KnownApplicationImpl[] values = mapper.readValue(data, tp); return Arrays.asList(values); } private boolean isEqualTo(KnownApplicationImpl imported, KnownApplicationImpl existing) { return imported.getName().equals(existing.getName()) && imported.getId().equals(existing.getId()) && Objects.equals(imported.getInfoUrl(), existing.getInfoUrl()) && Objects.equals(imported.getAliases(), existing.getAliases()) && businessCapabilityIsEqual(imported.getBusinessCapabilities(), existing.getBusinessCapabilities()); } private boolean businessCapabilityIsEqual(List<BusinessCapability> imported, List<BusinessCapability> existing) { if (imported == null && existing == null) { return true; } if (imported == null || existing == null) { return false; } if (imported.size() != existing.size()) { return false; } for (int i = 0; i < imported.size(); i++) { if (!(imported.get(i).getId().equals(existing.get(i).getId())) || !(imported.get(i).getName().equals(existing.get(i).getName()))) { return false; } } return true; } }
7,118
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CreateBackupJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/CreateBackupJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.util.HasKey; import com.hermesworld.ais.galapagos.util.JsonUtil; import org.json.JSONException; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import java.io.*; import java.util.Collection; import java.util.Optional; @Component public class CreateBackupJob implements AdminJob { private final KafkaClusters kafkaClusters; private final ObjectMapper objectMapper = JsonUtil.newObjectMapper(); public CreateBackupJob(KafkaClusters kafkaClusters) { this.kafkaClusters = kafkaClusters; } @Override public String getJobName() { return "create-backup"; } @Override public void run(ApplicationArguments allArguments) throws Exception { boolean createBackupFile = Optional.ofNullable(allArguments.getOptionValues("create.backup.file")) .flatMap(ls -> ls.stream().findFirst()).map(Boolean::parseBoolean).orElse(false); String outputFileName = Optional.ofNullable(allArguments.getOptionValues("output.filename")) .flatMap(ls -> ls.stream().findFirst()).map(String::new).orElse("backup.json"); JSONObject backup = new JSONObject(); System.out.println(); System.out.println("========================= Starting Backup Creation ========================"); System.out.println(); kafkaClusters.getEnvironmentIds().forEach(envId -> kafkaClusters.getEnvironment(envId) .ifPresent(env -> backup.put(envId, backupEnvironment(env)))); System.out.println(); System.out.println("========================= Backup Creation COMPLETE ========================"); System.out.println(); if (!createBackupFile) { System.out.println("Backup JSON:"); System.out.println(); System.out.println(backup.toString(2)); } if (createBackupFile) { System.out.println("========================= Generating Backup file as json ========================"); File file = new File(outputFileName); try (Writer writer = new BufferedWriter(new FileWriter(file))) { writer.write(backup.toString(2)); } catch (IOException e) { System.err.println("Could not create Backup file"); e.printStackTrace(); return; } System.out.println("========================= Generated Backup file as json in " + outputFileName + " ========================"); } } private JSONObject backupEnvironment(KafkaCluster cluster) { JSONObject result = new JSONObject(); for (TopicBasedRepository<?> backupTopic : cluster.getRepositories()) { result.put(backupTopic.getTopicName(), backupTopicData(cluster.getRepository(backupTopic.getTopicName(), backupTopic.getValueClass()))); } return result; } private JSONObject backupTopicData(TopicBasedRepository<? extends HasKey> repo) { JSONObject result = new JSONObject(); Collection<? extends HasKey> o = repo.getObjects(); for (HasKey obj : o) { try { result.put(obj.key(), new JSONObject(objectMapper.writeValueAsString(obj))); } catch (JSONException | JsonProcessingException e) { e.printStackTrace(); } } return result; } }
3,965
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SingleClusterAdminJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/SingleClusterAdminJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import org.springframework.boot.ApplicationArguments; import org.springframework.util.StringUtils; import java.util.Optional; /** * Abstract base class for admin jobs operating on a single Kafka cluster. It deals with extracting the Kafka cluster * information from the admin job parameter <code>kafka.environment</code>, and handles invalid or missing parameter * values. Subclasses will receive the parsed and looked up Kafka cluster object, but can parse other command line * arguments theirselves, if needed. */ public abstract class SingleClusterAdminJob implements AdminJob { protected final KafkaClusters kafkaClusters; protected SingleClusterAdminJob(KafkaClusters kafkaClusters) { this.kafkaClusters = kafkaClusters; } @Override public final void run(ApplicationArguments allArguments) throws Exception { String kafkaEnvironment = Optional.ofNullable(allArguments.getOptionValues("kafka.environment")) .flatMap(ls -> ls.stream().findFirst()).orElse(null); if (!StringUtils.hasLength(kafkaEnvironment)) { throw new IllegalArgumentException( "Please provide --kafka.environment=<id> to specify Kafka Environment to update application ACLs on."); } KafkaCluster cluster = kafkaClusters.getEnvironment(kafkaEnvironment).orElse(null); if (cluster == null) { throw new IllegalArgumentException("No Kafka Environment with ID " + kafkaEnvironment + " found"); } runOnCluster(cluster, allArguments); } protected abstract void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception; }
1,909
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CleanupDeveloperAuthenticationsJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/CleanupDeveloperAuthenticationsJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.devauth.DeveloperAuthenticationService; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; @Component public class CleanupDeveloperAuthenticationsJob implements AdminJob { private final DeveloperAuthenticationService developerAuthenticationService; public CleanupDeveloperAuthenticationsJob(DeveloperAuthenticationService developerAuthenticationService) { this.developerAuthenticationService = developerAuthenticationService; } @Override public String getJobName() { return "cleanup-developer-authentications"; } @Override public void run(ApplicationArguments allArguments) throws Exception { System.out.println(); System.out.println( "=========== Starting Cleanup of expired Developer Authentications on all Kafka clusters ==========="); System.out.println(); System.out.println("=========== Cleanup of total " + developerAuthenticationService.clearExpiredDeveloperAuthenticationsOnAllClusters().get() + " expired Developer Certificates on all Kafka clusters was successful ==========="); } }
1,334
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ImportBackupJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/ImportBackupJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.fasterxml.jackson.databind.ObjectMapper; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.util.HasKey; import com.hermesworld.ais.galapagos.util.JsonUtil; import org.json.JSONObject; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import org.springframework.util.StreamUtils; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Optional; import java.util.concurrent.ExecutionException; /** * Admin job for importing a backup into Galapagos.<br> * The job has two parameters: * <ul> * <li><code>--import.file=<i>&lt;json-file></i> - The name of a file from which the Objects are read and imported into * Galapagos.</li> * <li><code>--clearRepos=<i>&lt;boolean></i> - A boolean value, which indicates whether all Repositories should be * emptied before performing the import. Set it to true, if Repositories should be emptied, otherwise to false, so the * old Objects in the Repositories will still be there and the new ones are added additionally. <br> * Note that only repositories on environments which are present in the file to import are cleared.</li> * </ul> * * @author PolatEmr * */ @Component public class ImportBackupJob implements AdminJob { private final KafkaClusters kafkaClusters; private final ObjectMapper objectMapper; public ImportBackupJob(KafkaClusters kafkaClusters) { this.kafkaClusters = kafkaClusters; this.objectMapper = JsonUtil.newObjectMapper(); } @Override public String getJobName() { return "import-backup"; } @Override public void run(ApplicationArguments allArguments) throws Exception { String jsonFile = Optional.ofNullable(allArguments.getOptionValues("import.file")) .flatMap(ls -> ls.stream().findFirst()).orElse(null); Boolean emptyRepos = Optional.ofNullable(allArguments.getOptionValues("clearRepos")) .flatMap(ls -> ls.stream().findFirst().map(Boolean::parseBoolean)).orElse(null); if (jsonFile == null) { throw new IllegalArgumentException("Please provide a file using --import.file option"); } if (emptyRepos == null) { throw new IllegalArgumentException( "Please provide if existing repos should be cleared before importing backup using --clearRepos option"); } File f = new File(jsonFile); JSONObject data; try (FileInputStream fis = new FileInputStream(f)) { data = new JSONObject(StreamUtils.copyToString(fis, StandardCharsets.UTF_8)); } System.out.println(); System.out.println("========================= Starting Backup Import ========================"); System.out.println(); Iterator<String> envIds = data.keys(); while (envIds.hasNext()) { String envId = envIds.next(); KafkaCluster env = kafkaClusters.getEnvironment(envId).orElse(null); if (env == null) { continue; } if (emptyRepos) { System.out.println(); System.out.println("Clearing Repositories on Environment " + envId + "..."); System.out.println(); emptyRepos(env); } System.out.println("Importing environment " + envId + "..."); importBackup(env, data.getJSONObject(envId)); } System.out.println(); System.out.println("========================= Backup Import COMPLETE ========================"); System.out.println(); } @SuppressWarnings({ "unchecked", "rawtypes" }) private void importBackup(KafkaCluster env, JSONObject data) throws IOException, ExecutionException, InterruptedException { Iterator<String> topics = data.keys(); while (topics.hasNext()) { String topic = topics.next(); System.out.println("Currently importing: " + topic); TopicBasedRepository repo = env.getRepositories().stream().filter(r -> topic.equals(r.getTopicName())) .findFirst().orElse(null); if (repo == null) { System.err.println("Skipping nonexisting repo " + topic + "..."); continue; } Class<?> repoClass = repo.getValueClass(); JSONObject contents = data.getJSONObject(topic); Iterator<String> keys = contents.keys(); while (keys.hasNext()) { String key = keys.next(); JSONObject content = contents.getJSONObject(key); HasKey o = (HasKey) objectMapper.readValue(content.toString(), repoClass); repo.save(o).get(); } } } @SuppressWarnings({ "unchecked", "rawtypes" }) private void emptyRepos(KafkaCluster cluster) { for (TopicBasedRepository topicBasedRepository : cluster.getRepositories()) { for (Object object : topicBasedRepository.getObjects()) { try { topicBasedRepository.delete((HasKey) object).get(); } catch (InterruptedException e) { return; } catch (ExecutionException e) { throw new RuntimeException(e); } } } } }
5,805
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
MarkTopicApprovalRequiredJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/MarkTopicApprovalRequiredJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import java.util.ArrayList; import java.util.List; import java.util.Optional; import com.hermesworld.ais.galapagos.adminjobs.AdminJob; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.topics.service.TopicService; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; @Component public class MarkTopicApprovalRequiredJob implements AdminJob { private final KafkaClusters kafkaClusters; private final TopicService topicService; public MarkTopicApprovalRequiredJob(KafkaClusters kafkaClusters, @Qualifier("nonvalidating") TopicService topicService) { this.kafkaClusters = kafkaClusters; this.topicService = topicService; } @Override public String getJobName() { return "mark-topic-approval-required"; } @Override public void run(ApplicationArguments allArguments) throws Exception { List<String> topicNames = Optional.ofNullable(allArguments.getOptionValues("topic.name")).orElse(List.of()); if (topicNames.isEmpty()) { throw new IllegalArgumentException("Please provide at least one --topic.name=<topicname> parameter"); } // give Kafka some time to fill service metadata repository System.out.println("Waiting for metadata to be loaded..."); Thread.sleep(10000); List<String> resultLines = new ArrayList<>(); for (String topicName : topicNames) { for (String environmentId : kafkaClusters.getEnvironmentIds()) { if (topicService.getTopic(environmentId, topicName).isPresent()) { topicService.setSubscriptionApprovalRequiredFlag(environmentId, topicName, true).get(); resultLines.add("Topic " + topicName + " reconfigured on environment " + environmentId); } } } if (resultLines.isEmpty()) { throw new IllegalStateException("Could not find any of the specified topics on any environment"); } System.out.println(); System.out.println("============================ Topic(s) reconfigured ==========================="); System.out.println(); resultLines.forEach(System.out::println); System.out.println(); System.out.println("=============================================================================="); } }
2,564
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ResetApplicationPrefixesJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/ResetApplicationPrefixesJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.AclSupport; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import java.util.concurrent.ExecutionException; /** * This admin job "resets" the Prefixes and ACLs in the given Kafka Cluster for the given application. <br> * <h2>When to use</h2> * <p> * This admin job can e.g. be used when the Aliases of an application have changed so drastically that you want to * remove the prefixes and ACLs associated with the previous aliases from Galapagos and Kafka. By default, Galapagos * just <b>adds</b> prefix rights, but never removes them. * </p> * <h2>Risks</h2> * <p> * The application will <b>immediately lose</b> access to the prefixes associated with previous, but no longer valid * application aliases. If the running instance of the application currently still uses topics or consumer groups with * such a prefix, you should write and run migration code before performing this reset. If in doubt, <b>do not run</b> * this admin job. * </p> * <h2>How to use</h2> * <p> * Add <code>--galapagos.jobs.reset-application-prefixes</code> as startup parameter to Galapagos. <br> * The job has two required parameters: * <ul> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to reset application prefixes and * associated ACLs on, as configured for Galapagos.</li> * <li><code>--application.id</code> - ID of the application to reset, as stored in Galapagos. If you do not know how to * retrieve this ID, you most likely should better not execute this admin job (see risks above).</li> * </ul> * </p> */ @Component public class ResetApplicationPrefixesJob extends SingleClusterAdminJob { private final ApplicationsService applicationsService; private final AclSupport aclSupport; public ResetApplicationPrefixesJob(KafkaClusters kafkaClusters, ApplicationsService applicationsService, AclSupport aclSupport) { super(kafkaClusters); this.applicationsService = applicationsService; this.aclSupport = aclSupport; } @Override public String getJobName() { return "reset-application-prefixes"; } @Override protected void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { String applicationId = allArguments.getOptionValues("application.id").stream().findFirst() .orElseThrow(() -> new IllegalArgumentException("Please provide required parameter --application.id")); try { System.out.println("===== Resetting Prefixes and ACLs for Application " + applicationId + " ====="); applicationsService.resetApplicationPrefixes(cluster.getId(), applicationId) .thenCompose(metadata -> cluster.updateUserAcls(new ToolingUser(metadata, cluster.getId(), kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow(), aclSupport))) .get(); System.out.println("===== Prefixes and ACL Reset SUCCESSFUL ====="); } catch (ExecutionException e) { if (e.getCause() instanceof Exception) { throw (Exception) e.getCause(); } throw e; } } }
3,558
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UpdateApplicationAclsJob.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/adminjobs/impl/UpdateApplicationAclsJob.java
package com.hermesworld.ais.galapagos.adminjobs.impl; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.KafkaUser; import com.hermesworld.ais.galapagos.kafka.impl.ConnectedKafkaCluster; import com.hermesworld.ais.galapagos.kafka.util.AclSupport; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.acl.AclBinding; import org.apache.kafka.common.acl.AclBindingFilter; import org.json.JSONException; import org.springframework.boot.ApplicationArguments; import org.springframework.stereotype.Component; import org.springframework.util.StringUtils; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; /** * This admin job "refreshes" the ACLs in the given Kafka Cluster so all required ACLs for all applications - according * to Galapagos Metadata - are present. It also <b>removes</b> superfluous ACLs from Kafka set for the applications * known to Galapagos (ACLs not belonging to one of the applications registered in Galapagos are not changed). <br> * This admin job is particularly useful if new rights have been added to Galapagos logic (e.g. for Transactional IDs) * or if, for some reason, the ACLs in Kafka have been modified / corrupted. <br> * <br> * The job has one required and one optional parameter: * <ul> * <li><code>--kafka.environment=<i>&lt;id></i> - The ID of the Kafka Environment to restore the application ACLs on, as * configured for Galapagos.</li> * <li><code>--dry.run</code> - If present, no ACLs are created or deleted, but these actions are printed to STDOUT * instead.</li> * </ul> */ @Component @Slf4j public class UpdateApplicationAclsJob extends SingleClusterAdminJob { private final AclSupport aclSupport; private final ApplicationsService applicationsService; public UpdateApplicationAclsJob(KafkaClusters kafkaClusters, AclSupport aclSupport, ApplicationsService applicationsService) { super(kafkaClusters); this.aclSupport = aclSupport; this.applicationsService = applicationsService; } @Override public String getJobName() { return "update-application-acls"; } @Override public void runOnCluster(KafkaCluster cluster, ApplicationArguments allArguments) throws Exception { boolean dryRun = allArguments.containsOption("dry.run"); System.out.println("Waiting additional 10 seconds to wait for additional Kafka connection initializations..."); Thread.sleep(10000); performUpdate(cluster, id -> applicationsService.getApplicationMetadata(cluster.getId(), id), dryRun); System.out.println(); System.out.println("==================== Update of Application ACLs COMPLETE ===================="); System.out.println(); } void performUpdate(KafkaCluster cluster, Function<String, Optional<ApplicationMetadata>> metadataSource, boolean dryRun) throws Exception { Map<String, KnownApplication> applications = applicationsService.getKnownApplications(false).stream() .collect(Collectors.toMap(KnownApplication::getId, Function.identity())); List<AclBinding> dryRunCreatedAcls = new ArrayList<>(); List<AclBindingFilter> dryRunDeletedAcls = new ArrayList<>(); if (dryRun) { ((ConnectedKafkaCluster) cluster).wrapAdminClient(client -> new NoUpdatesAdminClient(client) { @Override public KafkaFuture<Void> createAcls(Collection<AclBinding> acls) { dryRunCreatedAcls.addAll(acls); return client.createAcls(List.of()); } @Override public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) { dryRunDeletedAcls.addAll(filters); return client.deleteAcls(List.of()); } }); } for (String id : applications.keySet()) { Optional<ApplicationMetadata> opMeta = metadataSource.apply(id); if (opMeta.isPresent()) { if (!dryRun) { System.out.println("Updating ACLs for application " + applications.get(id).getName()); } else { System.out.println("Following ACLs are required for " + applications.get(id).getName()); try { System.out.println(new ToolingUser(opMeta.get(), cluster.getId(), kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow(), aclSupport) .getRequiredAclBindings()); } catch (Exception e) { e.printStackTrace(); continue; } } updateApplicationAcl(cluster, opMeta.get()); } } if (dryRun) { System.out.println("Would CREATE the following ACLs:"); dryRunCreatedAcls.forEach(System.out::println); System.out.println(); System.out.println("Would DELETE the following ACLs:"); dryRunDeletedAcls.forEach(System.out::println); } } private void updateApplicationAcl(KafkaCluster cluster, ApplicationMetadata metadata) throws ExecutionException, InterruptedException { KafkaUser user = new ToolingUser(metadata, cluster.getId(), kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow(), aclSupport); try { if (StringUtils.hasLength(user.getKafkaUserName())) { cluster.updateUserAcls(user).get(); } } catch (JSONException e) { log.error("Could not update ACLs for application {}", metadata.getApplicationId(), e); } } }
6,305
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
Change.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/Change.java
package com.hermesworld.ais.galapagos.changes; /** * Describes a single change, from Galapagos point of view, on a Kafka cluster. <br> * A change can either be used in a Change Log, to describe the sequence of changes applied to a cluster, or for * <i>Staging</i>, where the list of <i>required</i> changes to be applied on a target environment are calculated. * * @author AlbrechtFlo * */ public interface Change { /** * Returns the type of this change. * * @return The type of this change, never <code>null</code>. */ ChangeType getChangeType(); }
588
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangeData.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/ChangeData.java
package com.hermesworld.ais.galapagos.changes; import java.time.ZonedDateTime; import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonFormat.Shape; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.util.HasKey; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter public class ChangeData implements Comparable<ChangeData>, HasKey { private String id; @JsonFormat(shape = Shape.STRING) private ZonedDateTime timestamp; private String principal; private String principalFullName; private Change change; @Override public String key() { return id; } @Override public int compareTo(ChangeData o) { if (o == null) { return -1; } if (timestamp == null) { return o.timestamp == null ? 0 : -1; } if (o.timestamp == null) { return 1; } return timestamp.compareTo(o.timestamp); } }
1,044
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangeType.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/ChangeType.java
package com.hermesworld.ais.galapagos.changes; public enum ChangeType { TOPIC_CREATED, TOPIC_SUBSCRIBED, TOPIC_UNSUBSCRIBED, TOPIC_SUBSCRIPTION_UPDATED, TOPIC_DELETED, TOPIC_DESCRIPTION_CHANGED, TOPIC_DEPRECATED, TOPIC_UNDEPRECATED, TOPIC_SUBSCRIPTION_APPROVAL_REQUIRED_FLAG_UPDATED, TOPIC_SCHEMA_VERSION_PUBLISHED, TOPIC_SCHEMA_VERSION_DELETED, TOPIC_PRODUCER_APPLICATION_ADDED, TOPIC_PRODUCER_APPLICATION_REMOVED, TOPIC_OWNER_CHANGED, /** * A change which consists of multiple changes. This is only used during Staging and will not be serialized to the * changelog. */ COMPOUND_CHANGE, /** * @deprecated No longer used as of Galapagos 0.3.0. */ @Deprecated APPLICATION_REGISTERED, /** * @deprecated As users can change configurations of topics via command-line as well, this change type is a little * bit confusing. Beginning with Galapagos 0.2.0, Topic configuration is treated as something outside * the scope of standard Galapagos model, and extra screens allow for direct editing of the * configuration stored in Kafka (as users could also do using Kafka command line tools). */ @Deprecated TOPIC_CONFIG_UPDATED }
1,260
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangesService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/ChangesService.java
package com.hermesworld.ais.galapagos.changes; import java.util.List; public interface ChangesService { public List<ChangeData> getChangeLog(String environmentId); }
174
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplyChangeContext.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/ApplyChangeContext.java
package com.hermesworld.ais.galapagos.changes; import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService; import com.hermesworld.ais.galapagos.topics.service.TopicService; public interface ApplyChangeContext { public String getTargetEnvironmentId(); public TopicService getTopicService(); public SubscriptionService getSubscriptionService(); }
383
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicableChange.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/ApplicableChange.java
package com.hermesworld.ais.galapagos.changes; import java.util.concurrent.CompletableFuture; public interface ApplicableChange extends Change { CompletableFuture<?> applyTo(ApplyChangeContext context); }
213
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ProfilePicture.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/config/ProfilePicture.java
package com.hermesworld.ais.galapagos.changes.config; public enum ProfilePicture { GRAVATAR, INITIALS, CUSTOM, NONE }
123
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosChangesConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/config/GalapagosChangesConfig.java
package com.hermesworld.ais.galapagos.changes.config; import lombok.Getter; import lombok.Setter; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; @Configuration @ConfigurationProperties("galapagos.changelog") @Getter @Setter public class GalapagosChangesConfig { private int entries; private int minDays; private ProfilePicture profilePicture; private ProfilePicture defaultPicture; private String customImageUrl; }
527
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangesController.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/controller/ChangesController.java
package com.hermesworld.ais.galapagos.changes.controller; import java.util.ArrayList; import java.util.List; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.hermesworld.ais.galapagos.changes.ChangeData; import com.hermesworld.ais.galapagos.changes.ChangesService; @RestController public class ChangesController { private ChangesService changesService; public ChangesController(ChangesService changesService) { this.changesService = changesService; } @GetMapping(value = "/api/environments/{environmentId}/changelog") public List<ChangeData> getChangeLog(@PathVariable String environmentId, @RequestParam(required = false, defaultValue = "10") int limit) { // TODO should throw a 404 if invalid environment ID; currently returning empty list then return toChangeLog(changesService.getChangeLog(environmentId), limit); } private List<ChangeData> toChangeLog(List<ChangeData> changes, int limit) { List<ChangeData> result = new ArrayList<>(limit); for (int i = changes.size() - 1; i >= 0 && result.size() < limit; i--) { result.add(changes.get(i)); } return result; } }
1,395
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangeDeserizalizer.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/impl/ChangeDeserizalizer.java
package com.hermesworld.ais.galapagos.changes.impl; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.fasterxml.jackson.databind.deser.std.StdDeserializer; import com.fasterxml.jackson.databind.node.ArrayNode; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.changes.Change; import com.hermesworld.ais.galapagos.changes.ChangeType; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.topics.SchemaMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.extern.slf4j.Slf4j; import java.io.IOException; import java.time.LocalDate; import java.util.ArrayList; import java.util.List; @Slf4j public class ChangeDeserizalizer extends StdDeserializer<Change> { private static final long serialVersionUID = -2708647785538261028L; private static final String TOPIC_NAME = "topicName"; private static final Change UNKNOWN_CHANGE = new Change() { @Override public ChangeType getChangeType() { return null; } }; public ChangeDeserizalizer() { super(Change.class); } @Override public Change deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException { ObjectMapper mapper = (ObjectMapper) p.getCodec(); JsonNode tree = mapper.readTree(p); return deserializeChange(tree, mapper); } @SuppressWarnings("deprecation") private Change deserializeChange(JsonNode tree, ObjectMapper mapper) { String ctValue = tree.findValue("changeType").asText(); ChangeType changeType; try { changeType = ChangeType.valueOf(ctValue); } catch (IllegalArgumentException e) { log.error("Invalid change type found in Changelog data: " + ctValue); // fail gently return UNKNOWN_CHANGE; } try { switch (changeType) { case TOPIC_CREATED: JsonNode params = tree.findValue("createParams"); if (params == null) { return new V010CreateTopicChange( mapper.treeToValue(tree.findValue("topicMetadata"), TopicMetadata.class)); } return ChangeBase.createTopic(mapper.treeToValue(tree.findValue("topicMetadata"), TopicMetadata.class), mapper.treeToValue(tree.findValue("createParams"), TopicCreateParamsDto.class) .toTopicCreateParams()); case TOPIC_DELETED: return ChangeBase.deleteTopic(tree.findValue(TOPIC_NAME).asText(), tree.findValue("internalTopic").asBoolean()); case TOPIC_SUBSCRIBED: return ChangeBase.subscribeTopic( mapper.treeToValue(tree.findValue("subscriptionMetadata"), SubscriptionMetadata.class)); case TOPIC_UNSUBSCRIBED: return ChangeBase.unsubscribeTopic( mapper.treeToValue(tree.findValue("subscriptionMetadata"), SubscriptionMetadata.class)); case TOPIC_SUBSCRIPTION_UPDATED: return ChangeBase.updateSubscription( mapper.treeToValue(tree.findValue("subscriptionMetadata"), SubscriptionMetadata.class)); case TOPIC_DESCRIPTION_CHANGED: return ChangeBase.updateTopicDescription(tree.findValue(TOPIC_NAME).asText(), tree.findValue("newDescription").asText(), tree.findValue("internalTopic").asBoolean()); case TOPIC_DEPRECATED: return ChangeBase.markTopicDeprecated(tree.findValue(TOPIC_NAME).asText(), tree.findValue("deprecationText").asText(), mapper.treeToValue(tree.findValue("eolDate"), LocalDate.class)); case TOPIC_UNDEPRECATED: return ChangeBase.unmarkTopicDeprecated(tree.findValue(TOPIC_NAME).asText()); case TOPIC_PRODUCER_APPLICATION_ADDED: return ChangeBase.addTopicProducer(tree.findValue(TOPIC_NAME).asText(), tree.findValue("producerApplicationId").asText()); case TOPIC_PRODUCER_APPLICATION_REMOVED: return ChangeBase.removeTopicProducer(tree.findValue(TOPIC_NAME).asText(), tree.findValue("producerApplicationId").asText()); case TOPIC_OWNER_CHANGED: return ChangeBase.changeTopicOwner(tree.findValue(TOPIC_NAME).asText(), tree.findValue("previousOwnerApplicationId").asText()); case TOPIC_SUBSCRIPTION_APPROVAL_REQUIRED_FLAG_UPDATED: return ChangeBase.updateTopicSubscriptionApprovalRequiredFlag(tree.findValue(TOPIC_NAME).asText(), tree.findValue("subscriptionApprovalRequired").asBoolean()); case TOPIC_CONFIG_UPDATED: // we intentionally throw away all additional change information, as it should not be used for anything. return new V010UpdateTopicConfig(); case TOPIC_SCHEMA_VERSION_PUBLISHED: return ChangeBase.publishTopicSchemaVersion(tree.findValue(TOPIC_NAME).asText(), mapper.treeToValue(tree.findValue("schemaMetadata"), SchemaMetadata.class)); case TOPIC_SCHEMA_VERSION_DELETED: return ChangeBase.deleteTopicSchemaVersion(tree.findValue(TOPIC_NAME).asText()); case APPLICATION_REGISTERED: return ChangeBase.registerApplication(tree.findValue("applicationId").asText(), mapper.treeToValue(tree.findValue("applicationMetadata"), ApplicationMetadata.class)); case COMPOUND_CHANGE: ChangeBase mainChange = (ChangeBase) deserializeChange(tree.findValue("mainChange"), mapper); JsonNode changeList = tree.findValue("additionalChanges"); List<ChangeBase> additionalChanges = new ArrayList<>(); if (changeList != null && changeList.isArray()) { ArrayNode changeArray = (ArrayNode) changeList; changeArray.forEach(node -> additionalChanges.add((ChangeBase) deserializeChange(node, mapper))); } return ChangeBase.compoundChange(mainChange, additionalChanges); } } catch (JsonProcessingException e) { log.error("Could not read change found in Changelog data", e); return UNKNOWN_CHANGE; } log.error("Unsupported Change type in Changelog data: " + changeType + ". KafkaChangeDeserializer class must be updated."); return UNKNOWN_CHANGE; } /** * Holder for a CREATE_TOPIC change which was generated from Galapagos 0.1.0 versions and thus did not include the * createParams attribute. Can not be used to apply the change to an environment, but can be used for changelog etc. * * @author AlbrechtFlo * */ @JsonSerialize static class V010CreateTopicChange implements Change { private final TopicMetadata topicMetadata; public V010CreateTopicChange(TopicMetadata topicMetadata) { this.topicMetadata = topicMetadata; } public TopicMetadata getTopicMetadata() { return topicMetadata; } @Override public ChangeType getChangeType() { return ChangeType.TOPIC_CREATED; } } static class V010UpdateTopicConfig implements Change { @SuppressWarnings("deprecation") @Override public ChangeType getChangeType() { return ChangeType.TOPIC_CONFIG_UPDATED; } } }
8,129
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangesServiceImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/impl/ChangesServiceImpl.java
package com.hermesworld.ais.galapagos.changes.impl; import com.fasterxml.jackson.core.JsonProcessingException; import com.hermesworld.ais.galapagos.changes.Change; import com.hermesworld.ais.galapagos.changes.ChangeData; import com.hermesworld.ais.galapagos.changes.ChangesService; import com.hermesworld.ais.galapagos.events.*; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster; import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository; import com.hermesworld.ais.galapagos.security.AuditPrincipal; import com.hermesworld.ais.galapagos.topics.TopicType; import com.hermesworld.ais.galapagos.util.JsonUtil; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Component; import java.time.ZonedDateTime; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @Component @Slf4j public class ChangesServiceImpl implements ChangesService, TopicEventsListener, SubscriptionEventsListener, InitPerCluster { private final KafkaClusters kafkaClusters; public ChangesServiceImpl(KafkaClusters kafkaClusters) { this.kafkaClusters = kafkaClusters; } @Override public void init(KafkaCluster cluster) { getRepository(cluster).getObjects(); } @Override public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) { return logChange(ChangeBase.createTopic(event.getMetadata(), event.getTopicCreateParams()), event); } @Override public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) { return logChange(ChangeBase.deleteTopic(event.getMetadata().getName(), event.getMetadata().getType() == TopicType.INTERNAL), event); } @Override public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) { return logChange(ChangeBase.updateTopicDescription(event.getMetadata().getName(), event.getMetadata().getDescription(), event.getMetadata().getType() == TopicType.INTERNAL), event); } @Override public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) { return logChange(ChangeBase.markTopicDeprecated(event.getMetadata().getName(), event.getMetadata().getDeprecationText(), event.getMetadata().getEolDate()), event); } @Override public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) { return logChange(ChangeBase.unmarkTopicDeprecated(event.getMetadata().getName()), event); } @Override public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) { return logChange(ChangeBase.updateTopicSubscriptionApprovalRequiredFlag(event.getMetadata().getName(), event.getMetadata().isSubscriptionApprovalRequired()), event); } @Override public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) { return logChange(ChangeBase.addTopicProducer(event.getMetadata().getName(), event.getProducerApplicationId()), event); } @Override public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) { return logChange( ChangeBase.removeTopicProducer(event.getMetadata().getName(), event.getProducerApplicationId()), event); } @Override public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) { return logChange( ChangeBase.changeTopicOwner(event.getMetadata().getName(), event.getPreviousOwnerApplicationId()), event); } @Override public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) { return logChange(ChangeBase.publishTopicSchemaVersion(event.getMetadata().getName(), event.getNewSchema()), event); } @Override public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) { return logChange(ChangeBase.deleteTopicSchemaVersion(event.getMetadata().getName()), event); } @Override public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event) { return logChange(ChangeBase.subscribeTopic(event.getMetadata()), event); } @Override public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event) { return logChange(ChangeBase.unsubscribeTopic(event.getMetadata()), event); } @Override public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event) { return logChange(ChangeBase.updateSubscription(event.getMetadata()), event); } @Override public List<ChangeData> getChangeLog(String environmentId) { return kafkaClusters.getEnvironment(environmentId) .map(cluster -> getRepository(cluster).getObjects().stream().sorted().collect(Collectors.toList())) .orElse(Collections.emptyList()); } private CompletableFuture<Void> logChange(Change change, AbstractGalapagosEvent event) { Optional<AuditPrincipal> principal = event.getContext().getContextValue("principal"); ChangeData data = toChangeData(change, principal); try { log.info("CHANGE on environment " + event.getContext().getKafkaCluster().getId() + ": " + JsonUtil.newObjectMapper().writeValueAsString(data)); } catch (JsonProcessingException e) { log.error("Could not log change", e); } return getRepository(event.getContext().getKafkaCluster()).save(data); } private TopicBasedRepository<ChangeData> getRepository(KafkaCluster kafkaCluster) { return kafkaCluster.getRepository("changelog", ChangeData.class); } private ChangeData toChangeData(Change change, Optional<AuditPrincipal> principal) { ChangeData data = new ChangeData(); data.setId(UUID.randomUUID().toString()); data.setPrincipal(principal.map(p -> p.getName()).orElse("_SYSTEM")); data.setPrincipalFullName(principal.filter(p -> p.getFullName() != null).map(AuditPrincipal::getFullName) .orElse(data.getPrincipal())); data.setTimestamp(ZonedDateTime.now()); data.setChange(change); return data; } }
6,533
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicCreateParamsDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/impl/TopicCreateParamsDto.java
package com.hermesworld.ais.galapagos.changes.impl; import java.util.Collections; import java.util.Map; import java.util.stream.Collectors; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter final class TopicCreateParamsDto { private int numberOfPartitions; private int replicationFactor; private Map<String, Object> topicConfigs; public TopicCreateParamsDto() { } public TopicCreateParamsDto(TopicCreateParams params) { this.numberOfPartitions = params.getNumberOfPartitions(); this.replicationFactor = params.getReplicationFactor(); this.topicConfigs = params.getTopicConfigs().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); } public Map<String, String> topicConfigsAsStringMap() { return topicConfigs == null ? Collections.emptyMap() : topicConfigs.entrySet().stream().collect( Collectors.toMap(e -> e.getKey(), e -> e.getValue() == null ? null : e.getValue().toString())); } public TopicCreateParams toTopicCreateParams() { return new TopicCreateParams(numberOfPartitions, replicationFactor, topicConfigsAsStringMap()); } }
1,365
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ChangeBase.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/changes/impl/ChangeBase.java
package com.hermesworld.ais.galapagos.changes.impl; import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.changes.ApplicableChange; import com.hermesworld.ais.galapagos.changes.ApplyChangeContext; import com.hermesworld.ais.galapagos.changes.Change; import com.hermesworld.ais.galapagos.changes.ChangeType; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.topics.SchemaCompatCheckMode; import com.hermesworld.ais.galapagos.topics.SchemaMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import java.time.LocalDate; import java.util.ArrayList; import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; import java.util.concurrent.CompletableFuture; public abstract class ChangeBase implements ApplicableChange { // The "internalTopic" flag on some change classes is used for GUI filtering purposes only! private final ChangeType changeType; ChangeBase(ChangeType changeType) { this.changeType = changeType; } @Override public ChangeType getChangeType() { return changeType; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (obj == this) { return true; } if (obj.getClass() != getClass()) { return false; } return isEqualTo((ChangeBase) obj); } @Override public int hashCode() { return changeType.hashCode(); } /** * Implementation notice: It is enough to check for a "semantical" equality, e.g. compare the topic name only for a * <code>TOPIC_CREATED</code> change. You can also safely cast the parameter to your exact change class. * * @param other Change to compare this change to. * * @return <code>true</code> if this change is semantically equal to the given change, <code>false</code> otherwise. */ protected abstract boolean isEqualTo(ChangeBase other); public static ChangeBase createTopic(TopicMetadata topicMetadata, TopicCreateParams createParams) { return new CreateTopicChange(topicMetadata, createParams); } public static ChangeBase subscribeTopic(SubscriptionMetadata subscriptionMetadata) { return new SubscribeToTopicChange(subscriptionMetadata); } public static ChangeBase deleteTopic(String topicName, boolean internalTopic) { return new DeleteTopicChange(topicName, internalTopic); } public static ChangeBase unsubscribeTopic(SubscriptionMetadata subscriptionMetadata) { return new UnsubscribeFromTopicChange(subscriptionMetadata); } public static ChangeBase updateSubscription(SubscriptionMetadata subscriptionMetadata) { return new UpdateSubscriptionChange(subscriptionMetadata); } public static ChangeBase updateTopicDescription(String topicName, String newDescription, boolean internalTopic) { return new UpdateTopicDescriptionChange(topicName, newDescription, internalTopic); } public static ChangeBase markTopicDeprecated(String topicName, String deprecationText, LocalDate eolDate) { return new DeprecateTopicChange(topicName, deprecationText, eolDate); } public static ChangeBase unmarkTopicDeprecated(String topicName) { return new UndeprecateTopicChange(topicName); } public static ChangeBase updateTopicSubscriptionApprovalRequiredFlag(String topicName, boolean subscriptionApprovalRequired) { return new UpdateSubscriptionApprovalRequiredFlagChange(topicName, subscriptionApprovalRequired); } public static ChangeBase publishTopicSchemaVersion(String topicName, SchemaMetadata schemaVersion) { return new PublishTopicSchemaVersionChange(topicName, schemaVersion); } public static ChangeBase deleteTopicSchemaVersion(String topicName) { return new DeleteTopicSchemaVersionChange(topicName); } public static ChangeBase addTopicProducer(String topicName, String producerApplicationId) { return new TopicProducerAddChange(topicName, producerApplicationId); } public static ChangeBase removeTopicProducer(String topicName, String producerApplicationId) { return new TopicProducerRemoveChange(topicName, producerApplicationId); } public static ChangeBase changeTopicOwner(String topicName, String producerApplicationId) { return new TopicOwnerChange(topicName, producerApplicationId); } /** * @deprecated Is no longer signalled by a "change", but as a Galapagos Event. */ @Deprecated public static Change registerApplication(String applicationId, ApplicationMetadata metadata) { return new RegisterApplicationChange(applicationId, metadata); } /** * Creates a new "compound change" which consists of one main change and a (potentially empty) list of additional * changes. When the change is applied to an environment, the main change is applied first, the additional changes * afterwards (in the order of the list). The returned future of the <code>applyTo</code> method of this change * returns when <b>all</b> changes are complete, or fails if <b>any</b> change fails. Note that some changes may * have been applied successfully to the target environment even in case of failure of the future. * * @param mainChange Main change of the new "compound change" * @param additionalChanges Additional changes of the new "compound change" * * @return A change which consists of all the given changes and behaves like described above. */ public static ChangeBase compoundChange(ChangeBase mainChange, List<ChangeBase> additionalChanges) { return new CompoundChange(mainChange, additionalChanges); } } @JsonSerialize final class CreateTopicChange extends ChangeBase { private final TopicMetadata topicMetadata; private final TopicCreateParamsDto createParams; CreateTopicChange(TopicMetadata topicMetadata, TopicCreateParams createParams) { super(ChangeType.TOPIC_CREATED); this.topicMetadata = new TopicMetadata(topicMetadata); this.createParams = new TopicCreateParamsDto(createParams); } public TopicMetadata getTopicMetadata() { return topicMetadata; } public TopicCreateParamsDto getCreateParams() { return createParams; } @Override protected boolean isEqualTo(ChangeBase other) { CreateTopicChange change = (CreateTopicChange) other; return Objects.equals(topicMetadata.getName(), change.topicMetadata.getName()); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().createTopic(context.getTargetEnvironmentId(), topicMetadata, createParams.getNumberOfPartitions(), createParams.topicConfigsAsStringMap()); } } @JsonSerialize final class DeleteTopicChange extends ChangeBase { private final String topicName; private final boolean internalTopic; public DeleteTopicChange(String topicName, boolean internalTopic) { super(ChangeType.TOPIC_DELETED); this.topicName = topicName; this.internalTopic = internalTopic; } public String getTopicName() { return topicName; } public boolean isInternalTopic() { return internalTopic; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((DeleteTopicChange) other).topicName); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().deleteTopic(context.getTargetEnvironmentId(), topicName); } } @JsonSerialize final class SubscribeToTopicChange extends ChangeBase { private final SubscriptionMetadata subscriptionMetadata; SubscribeToTopicChange(SubscriptionMetadata subscriptionMetadata) { super(ChangeType.TOPIC_SUBSCRIBED); this.subscriptionMetadata = new SubscriptionMetadata(subscriptionMetadata); } public SubscriptionMetadata getSubscriptionMetadata() { return subscriptionMetadata; } @Override protected boolean isEqualTo(ChangeBase other) { SubscribeToTopicChange change = (SubscribeToTopicChange) other; return Objects.equals(subscriptionMetadata.getClientApplicationId(), change.subscriptionMetadata.getClientApplicationId()) && Objects.equals(subscriptionMetadata.getTopicName(), change.subscriptionMetadata.getTopicName()); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getSubscriptionService().addSubscription(context.getTargetEnvironmentId(), subscriptionMetadata); } } @JsonSerialize final class UpdateSubscriptionChange extends ChangeBase { UpdateSubscriptionChange(SubscriptionMetadata subscriptionMetadata) { super(ChangeType.TOPIC_SUBSCRIPTION_UPDATED); this.subscriptionMetadata = new SubscriptionMetadata(subscriptionMetadata); } private final SubscriptionMetadata subscriptionMetadata; public SubscriptionMetadata getSubscriptionMetadata() { return subscriptionMetadata; } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Cannot stage subscription state updates"); } @Override protected boolean isEqualTo(ChangeBase other) { UpdateSubscriptionChange change = (UpdateSubscriptionChange) other; return Objects.equals(subscriptionMetadata.getClientApplicationId(), change.subscriptionMetadata.getClientApplicationId()) && Objects.equals(subscriptionMetadata.getTopicName(), change.subscriptionMetadata.getTopicName()) && subscriptionMetadata.getState() == change.subscriptionMetadata.getState(); } } @JsonSerialize final class UnsubscribeFromTopicChange extends ChangeBase { private final SubscriptionMetadata subscriptionMetadata; UnsubscribeFromTopicChange(SubscriptionMetadata subscriptionMetadata) { super(ChangeType.TOPIC_UNSUBSCRIBED); this.subscriptionMetadata = new SubscriptionMetadata(subscriptionMetadata); } public SubscriptionMetadata getSubscriptionMetadata() { return subscriptionMetadata; } @Override protected boolean isEqualTo(ChangeBase other) { UnsubscribeFromTopicChange change = (UnsubscribeFromTopicChange) other; return Objects.equals(subscriptionMetadata.getClientApplicationId(), change.subscriptionMetadata.getClientApplicationId()) && Objects.equals(subscriptionMetadata.getTopicName(), change.subscriptionMetadata.getTopicName()); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { String applId = subscriptionMetadata.getClientApplicationId(); String topicName = subscriptionMetadata.getTopicName(); SubscriptionMetadata toDelete = context.getSubscriptionService() .getSubscriptionsOfApplication(context.getTargetEnvironmentId(), applId, true).stream() .filter(m -> topicName.equals(m.getTopicName())).findAny().orElse(null); if (toDelete == null) { return CompletableFuture.failedFuture(new NoSuchElementException( "No subscription of this application to topic " + topicName + " found on target environment.")); } return context.getSubscriptionService().deleteSubscription(context.getTargetEnvironmentId(), toDelete.getId()); } } @JsonSerialize final class UpdateTopicDescriptionChange extends ChangeBase { private final String topicName; private final String newDescription; private final boolean internalTopic; public UpdateTopicDescriptionChange(String topicName, String newDescription, boolean internalTopic) { super(ChangeType.TOPIC_DESCRIPTION_CHANGED); this.topicName = topicName; this.newDescription = newDescription; this.internalTopic = internalTopic; } public String getTopicName() { return topicName; } public String getNewDescription() { return newDescription; } public boolean isInternalTopic() { return internalTopic; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((UpdateTopicDescriptionChange) other).topicName); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().updateTopicDescription(context.getTargetEnvironmentId(), topicName, newDescription); } } @JsonSerialize final class TopicProducerAddChange extends ChangeBase { private final String topicName; private final String producerApplicationId; public TopicProducerAddChange(String topicName, String producerApplicationId) { super(ChangeType.TOPIC_PRODUCER_APPLICATION_ADDED); this.topicName = topicName; this.producerApplicationId = producerApplicationId; } public String getTopicName() { return topicName; } public String getProducerApplicationId() { return producerApplicationId; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((TopicProducerAddChange) other).topicName) && producerApplicationId.equals(((TopicProducerAddChange) other).producerApplicationId); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().addTopicProducer(context.getTargetEnvironmentId(), topicName, producerApplicationId); } } @JsonSerialize final class TopicOwnerChange extends ChangeBase { private final String topicName; private final String previousOwnerApplicationId; public TopicOwnerChange(String topicName, String previousOwnerApplicationId) { super(ChangeType.TOPIC_OWNER_CHANGED); this.topicName = topicName; this.previousOwnerApplicationId = previousOwnerApplicationId; } public String getTopicName() { return topicName; } public String getPreviousOwnerApplicationId() { return previousOwnerApplicationId; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((TopicOwnerChange) other).topicName) && previousOwnerApplicationId.equals(((TopicOwnerChange) other).previousOwnerApplicationId); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Topic Owner changes cannot be applied"); } } @JsonSerialize final class TopicProducerRemoveChange extends ChangeBase { private final String topicName; private final String producerApplicationId; public TopicProducerRemoveChange(String topicName, String producerApplicationId) { super(ChangeType.TOPIC_PRODUCER_APPLICATION_REMOVED); this.topicName = topicName; this.producerApplicationId = producerApplicationId; } public String getTopicName() { return topicName; } public String getProducerApplicationId() { return producerApplicationId; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((TopicProducerRemoveChange) other).topicName) && producerApplicationId.equals(((TopicProducerRemoveChange) other).producerApplicationId); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().removeTopicProducer(context.getTargetEnvironmentId(), topicName, producerApplicationId); } } @JsonSerialize final class DeprecateTopicChange extends ChangeBase { private final String topicName; private final String deprecationText; @JsonFormat(shape = JsonFormat.Shape.STRING) private final LocalDate eolDate; public DeprecateTopicChange(String topicName, String deprecationText, LocalDate eolDate) { super(ChangeType.TOPIC_DEPRECATED); this.topicName = topicName; this.deprecationText = deprecationText; this.eolDate = eolDate; } public String getTopicName() { return topicName; } public String getDeprecationText() { return deprecationText; } public LocalDate getEolDate() { return eolDate; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((DeprecateTopicChange) other).topicName); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Deprecation changes cannot be applied"); } } @JsonSerialize final class UndeprecateTopicChange extends ChangeBase { private final String topicName; public UndeprecateTopicChange(String topicName) { super(ChangeType.TOPIC_UNDEPRECATED); this.topicName = topicName; } public String getTopicName() { return topicName; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((UndeprecateTopicChange) other).topicName); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Deprecation changes cannot be applied"); } } @JsonSerialize final class UpdateSubscriptionApprovalRequiredFlagChange extends ChangeBase { private final String topicName; private final boolean subscriptionApprovalRequired; public UpdateSubscriptionApprovalRequiredFlagChange(String topicName, boolean subscriptionApprovalRequired) { super(ChangeType.TOPIC_SUBSCRIPTION_APPROVAL_REQUIRED_FLAG_UPDATED); this.topicName = topicName; this.subscriptionApprovalRequired = subscriptionApprovalRequired; } public String getTopicName() { return topicName; } public boolean isSubscriptionApprovalRequired() { return subscriptionApprovalRequired; } @Override protected boolean isEqualTo(ChangeBase other) { return Objects.equals(topicName, ((UpdateSubscriptionApprovalRequiredFlagChange) other).topicName) && subscriptionApprovalRequired == ((UpdateSubscriptionApprovalRequiredFlagChange) other).subscriptionApprovalRequired; } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().setSubscriptionApprovalRequiredFlag(context.getTargetEnvironmentId(), topicName, subscriptionApprovalRequired); } } @JsonSerialize final class PublishTopicSchemaVersionChange extends ChangeBase { private final String topicName; private final SchemaMetadata schemaMetadata; public PublishTopicSchemaVersionChange(String topicName, SchemaMetadata schemaMetadata) { super(ChangeType.TOPIC_SCHEMA_VERSION_PUBLISHED); this.topicName = topicName; this.schemaMetadata = schemaMetadata; } public String getTopicName() { return topicName; } public SchemaMetadata getSchemaMetadata() { return schemaMetadata; } @Override protected boolean isEqualTo(ChangeBase other) { PublishTopicSchemaVersionChange change = (PublishTopicSchemaVersionChange) other; return Objects.equals(topicName, change.topicName) && schemaMetadata.getSchemaVersion() == change.schemaMetadata.getSchemaVersion(); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { return context.getTopicService().addTopicSchemaVersion(context.getTargetEnvironmentId(), schemaMetadata, SchemaCompatCheckMode.SKIP_SCHEMA_CHECK); } } @JsonSerialize final class DeleteTopicSchemaVersionChange extends ChangeBase { private final String topicName; public DeleteTopicSchemaVersionChange(String topicName) { super(ChangeType.TOPIC_SCHEMA_VERSION_DELETED); this.topicName = topicName; } public String getTopicName() { return topicName; } @Override protected boolean isEqualTo(ChangeBase other) { return false; } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Schema deletion cannot be staged"); } } @JsonSerialize final class RegisterApplicationChange extends ChangeBase { private final String applicationId; private final ApplicationMetadata applicationMetadata; @SuppressWarnings("deprecation") public RegisterApplicationChange(String applicationId, ApplicationMetadata applicationMetadata) { super(ChangeType.APPLICATION_REGISTERED); this.applicationId = applicationId; this.applicationMetadata = applicationMetadata; } public String getApplicationId() { return applicationId; } public ApplicationMetadata getApplicationMetadata() { return applicationMetadata; } @Override protected boolean isEqualTo(ChangeBase other) { // will never be used during staging (and should not be used at all) - so never equal to anything return false; } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { throw new UnsupportedOperationException("Application registrations cannot be staged"); } } @JsonSerialize final class CompoundChange extends ChangeBase { private final ChangeBase mainChange; private final List<ChangeBase> additionalChanges; public CompoundChange(ChangeBase mainChange, List<ChangeBase> additionalChanges) { super(ChangeType.COMPOUND_CHANGE); this.mainChange = mainChange; this.additionalChanges = new ArrayList<>(additionalChanges); } public ChangeBase getMainChange() { return mainChange; } public List<ChangeBase> getAdditionalChanges() { return additionalChanges; } @Override protected boolean isEqualTo(ChangeBase other) { CompoundChange change = (CompoundChange) other; return mainChange.equals(change.mainChange) && additionalChanges.equals(change.additionalChanges); } @Override public CompletableFuture<?> applyTo(ApplyChangeContext context) { CompletableFuture<?> result = mainChange.applyTo(context); for (ChangeBase change : additionalChanges) { result = result.thenCompose(o -> change.applyTo(context)); } return result; } }
23,167
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
InvalidTopicNameException.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/InvalidTopicNameException.java
package com.hermesworld.ais.galapagos.naming; public class InvalidTopicNameException extends Exception { private static final long serialVersionUID = -3480234944361435273L; public InvalidTopicNameException(String message) { super(message); } }
268
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
NamingService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/NamingService.java
package com.hermesworld.ais.galapagos.naming; import com.hermesworld.ais.galapagos.applications.BusinessCapability; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.topics.TopicType; /** * Service for determining and validating names of Kafka objects like Topics, Consumer Groups, and Transactional IDs. */ public interface NamingService { /** * Normalizes the given name, for use in naming objects (but not e.g. for use in certificate DNs). Adheres to the * configured normalization strategy. Localized special characters like Ä, Ö, É etc are normalized using ICU4J and * the <code>de-ASCII</code> transliteration. Spaces and special characters are treated as word separators. Adjacent * word separators are combined to one. The normalization strategy determines how the resulting words are combined. * * @param name Name to normalize for use in naming objects. * @return Normalized Name. * @throws NullPointerException If name is <code>null</code>. */ String normalize(String name); String getTopicNameSuggestion(TopicType topicType, KnownApplication application, BusinessCapability capability); ApplicationPrefixes getAllowedPrefixes(KnownApplication application); void validateTopicName(String topicName, TopicType topicType, KnownApplication application) throws InvalidTopicNameException; }
1,440
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationPrefixes.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/ApplicationPrefixes.java
package com.hermesworld.ais.galapagos.naming; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; public interface ApplicationPrefixes { ApplicationPrefixes EMPTY = new ApplicationPrefixes() { @Override public List<String> getInternalTopicPrefixes() { return List.of(); } @Override public List<String> getConsumerGroupPrefixes() { return List.of(); } @Override public List<String> getTransactionIdPrefixes() { return List.of(); } }; List<String> getInternalTopicPrefixes(); List<String> getConsumerGroupPrefixes(); List<String> getTransactionIdPrefixes(); /** * Combines the prefixes set in this object with the prefixes set in the given other object, returning the "union" * of both, but removing duplicate prefixes. * * @param other Other set of prefixes to combine with this set. * @return A "union" of both sets of prefixes, without duplicates in its lists. */ default ApplicationPrefixes combineWith(ApplicationPrefixes other) { ApplicationPrefixes thisObj = this; return new ApplicationPrefixes() { @Override public List<String> getInternalTopicPrefixes() { return Stream .concat(thisObj.getInternalTopicPrefixes().stream(), other.getInternalTopicPrefixes().stream()) .distinct().collect(Collectors.toList()); } @Override public List<String> getConsumerGroupPrefixes() { return Stream .concat(thisObj.getConsumerGroupPrefixes().stream(), other.getConsumerGroupPrefixes().stream()) .distinct().collect(Collectors.toList()); } @Override public List<String> getTransactionIdPrefixes() { return Stream .concat(thisObj.getTransactionIdPrefixes().stream(), other.getTransactionIdPrefixes().stream()) .distinct().collect(Collectors.toList()); } }; } }
2,198
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CaseStrategy.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/config/CaseStrategy.java
package com.hermesworld.ais.galapagos.naming.config; import org.springframework.util.ObjectUtils; import org.springframework.util.StringUtils; import java.util.List; import java.util.Locale; import java.util.function.Function; import java.util.stream.Collectors; /** * The different Case strategies which can be used at multiple locations in the naming configuration. <br> * Each strategy can be used to validate a given String against this strategy, and to format a list of words according * to this strategy. <br> * See <code>application.properties</code> for examples and default values. */ public enum CaseStrategy { PASCAL_CASE("PascalCase", "([A-Z][a-z0-9]+)+", words -> words.stream().map(word -> StringUtils.capitalize(word.toLowerCase(Locale.US))) .collect(Collectors.joining())), CAMEL_CASE("camelCase", "[a-z]+[a-z0-9]*([A-Z][a-z0-9]+)*", words -> words.get(0).toLowerCase(Locale.US) + words.subList(1, words.size()).stream() .map(word -> StringUtils.capitalize(word.toLowerCase(Locale.US))).collect(Collectors.joining())), KEBAB_CASE("kebab-case", "([a-z][a-z0-9]*|[a-z][a-z0-9]*\\-[a-z0-9]*)+", words -> words.stream().map(word -> word.toLowerCase(Locale.US)).collect(Collectors.joining("-"))), LOWERCASE("lowercase", "[a-z][a-z0-9]*", words -> words.stream().map(word -> word.toLowerCase(Locale.US)).collect(Collectors.joining())), SNAKE_CASE("SNAKE_CASE", "([A-Z][A-Z0-9]*|[A-Z][A-Z0-9]*_[A-Z0-9]*)+", words -> words.stream().map(word -> word.toUpperCase(Locale.US)).collect(Collectors.joining("_"))); private final String configValue; private final String validatorRegex; private final Function<List<String>, String> formatter; CaseStrategy(String configValue, String validatorRegex, Function<List<String>, String> formatter) { this.configValue = configValue; this.validatorRegex = validatorRegex; this.formatter = formatter; } public String configValue() { return configValue; } public boolean matches(String input) { if (ObjectUtils.isEmpty(input)) { return false; } return input.matches(validatorRegex); } public String format(List<String> words) { return formatter.apply(words); } }
2,360
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicNamingConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/config/TopicNamingConfig.java
package com.hermesworld.ais.galapagos.naming.config; import com.hermesworld.ais.galapagos.naming.config.AdditionNamingRules; import lombok.Getter; import lombok.Setter; /** * Represents the naming schema for one type of API Topics (Events, Commands, or Data). */ @Getter @Setter public class TopicNamingConfig { private String nameFormat; private AdditionNamingRules additionRules; }
399
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CaseStrategyConverterBinding.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/config/CaseStrategyConverterBinding.java
package com.hermesworld.ais.galapagos.naming.config; import org.springframework.boot.context.properties.ConfigurationPropertiesBinding; import org.springframework.core.convert.converter.Converter; import org.springframework.lang.NonNull; import org.springframework.stereotype.Component; import java.util.Arrays; @Component @ConfigurationPropertiesBinding public class CaseStrategyConverterBinding implements Converter<String, CaseStrategy> { @Override public CaseStrategy convert(@NonNull String source) { return Arrays.stream(CaseStrategy.values()).filter(strategy -> strategy.configValue().equals(source)).findAny() .orElseThrow(() -> new IllegalArgumentException("Invalid case strategy: " + source)); } }
748
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AdditionNamingRules.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/config/AdditionNamingRules.java
package com.hermesworld.ais.galapagos.naming.config; import lombok.Getter; import lombok.Setter; /** * Represents the naming rules applying to the "addition" in the name of the API topic, so the "free" part after the * prefix given by naming configuration. */ @Getter @Setter public class AdditionNamingRules { private boolean allowPascalCase; private boolean allowCamelCase; private boolean allowKebabCase; private boolean allowSnakeCase; private String allowedSeparators; }
507
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
NamingConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/config/NamingConfig.java
package com.hermesworld.ais.galapagos.naming.config; import lombok.Getter; import lombok.Setter; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; import org.springframework.lang.NonNull; import org.springframework.util.StringUtils; import org.springframework.validation.Errors; import org.springframework.validation.Validator; import org.springframework.validation.annotation.Validated; @Configuration @ConfigurationProperties("galapagos.naming") @Validated @Setter @Getter public class NamingConfig implements Validator { public static final String PARAM_APPLICATION = "{application}"; public static final String PARAM_APP_OR_ALIAS = "{app-or-alias}"; public static final String PARAM_BUSINESS_CAPABILITY = "{business-capability}"; public static final String PARAM_ADDITION = "{addition}"; public static final String KAFKA_VALID_NAMES_REGEX = "[a-zA-Z0-9._\\-]+"; private TopicNamingConfig events = new TopicNamingConfig(); private TopicNamingConfig commands = new TopicNamingConfig(); private TopicNamingConfig data = new TopicNamingConfig(); private String internalTopicPrefixFormat; private String consumerGroupPrefixFormat; private String transactionalIdPrefixFormat; private boolean allowInternalTopicNamesAsConsumerGroups; private CaseStrategy normalizationStrategy; @Override public boolean supports(@NonNull Class<?> clazz) { return clazz == String.class || clazz == AdditionNamingRules.class; } @Override public void validate(@NonNull Object target, @NonNull Errors errors) { String objName = errors.getObjectName(); if (target instanceof String && objName.endsWith("-format")) { checkValidFormat(target.toString(), errors); } else if (target instanceof AdditionNamingRules rules) { if (StringUtils.hasLength(rules.getAllowedSeparators()) && !rules.getAllowedSeparators().matches(KAFKA_VALID_NAMES_REGEX)) { errors.rejectValue("allowedSeparators", "invalid.value", "The separators must be valid for use in Kafka Topic Names. Only dots, underscores, and hyphens are allowed."); } } } private void checkValidFormat(String format, Errors errors) { if (format.contains(PARAM_ADDITION) && !format.endsWith(PARAM_ADDITION)) { errors.rejectValue(null, "invalid.addition.position", errors.getObjectName() + " must END with placeholder " + PARAM_ADDITION); return; } format = format.replace(PARAM_APPLICATION, "test").replace(PARAM_APP_OR_ALIAS, "test") .replace(PARAM_BUSINESS_CAPABILITY, "test").replace(PARAM_ADDITION, "test"); if (!format.matches(KAFKA_VALID_NAMES_REGEX)) { errors.rejectValue(null, "invalid.format", errors.getObjectName() + " contains invalid characters for Kafka object names."); } } }
3,077
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationPrefixesImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/impl/ApplicationPrefixesImpl.java
package com.hermesworld.ais.galapagos.naming.impl; import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes; import lombok.Getter; import org.springframework.lang.NonNull; import java.util.List; @Getter public class ApplicationPrefixesImpl implements ApplicationPrefixes { private final List<String> internalTopicPrefixes; private final List<String> consumerGroupPrefixes; private final List<String> transactionIdPrefixes; public ApplicationPrefixesImpl(@NonNull List<String> internalTopicPrefixes, @NonNull List<String> consumerGroupPrefixes, @NonNull List<String> transactionIdPrefixes) { this.internalTopicPrefixes = List.copyOf(internalTopicPrefixes); this.consumerGroupPrefixes = List.copyOf(consumerGroupPrefixes); this.transactionIdPrefixes = List.copyOf(transactionIdPrefixes); } }
861
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
NamingServiceImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/naming/impl/NamingServiceImpl.java
package com.hermesworld.ais.galapagos.naming.impl; import com.hermesworld.ais.galapagos.applications.BusinessCapability; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes; import com.hermesworld.ais.galapagos.naming.InvalidTopicNameException; import com.hermesworld.ais.galapagos.naming.NamingService; import com.hermesworld.ais.galapagos.naming.config.AdditionNamingRules; import com.hermesworld.ais.galapagos.naming.config.CaseStrategy; import com.hermesworld.ais.galapagos.naming.config.NamingConfig; import com.hermesworld.ais.galapagos.naming.config.TopicNamingConfig; import com.hermesworld.ais.galapagos.topics.TopicType; import com.ibm.icu.text.Transliterator; import org.springframework.stereotype.Service; import org.springframework.util.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; @Service public class NamingServiceImpl implements NamingService { private static final String DEFAULT_PLACEHOLDER = "My Topic"; private final NamingConfig config; public NamingServiceImpl(NamingConfig config) { this.config = config; } @Override public String getTopicNameSuggestion(TopicType topicType, KnownApplication application, BusinessCapability capability) { if (topicType == TopicType.INTERNAL) { return getAllowedPrefixes(application).getInternalTopicPrefixes().get(0) + normalize(DEFAULT_PLACEHOLDER); } if (capability == null) { return null; } TopicNamingConfig namingConfig = getTopicNamingConfig(topicType); if (namingConfig == null) { return null; } return formatSingle(namingConfig.getNameFormat(), application, capability).replace(NamingConfig.PARAM_ADDITION, normalize(DEFAULT_PLACEHOLDER)); } @Override public ApplicationPrefixes getAllowedPrefixes(KnownApplication application) { List<String> internalTopicPrefixes = formatMulti(config.getInternalTopicPrefixFormat(), application, null); List<String> consumerGroupPrefixes = formatMulti(config.getConsumerGroupPrefixFormat(), application, null); if (config.isAllowInternalTopicNamesAsConsumerGroups()) { consumerGroupPrefixes = Stream.concat(consumerGroupPrefixes.stream(), internalTopicPrefixes.stream()) .distinct().collect(Collectors.toList()); } return new ApplicationPrefixesImpl(internalTopicPrefixes, consumerGroupPrefixes, formatMulti(config.getTransactionalIdPrefixFormat(), application, null)); } @Override public void validateTopicName(String topicName, TopicType topicType, KnownApplication application) throws InvalidTopicNameException { // first of all, must be a valid Kafka Topic Name! if (topicName.length() > 249) { throw new InvalidTopicNameException("Topic name too long! Max length is 249 characters"); } if (!topicName.matches(NamingConfig.KAFKA_VALID_NAMES_REGEX)) { throw new InvalidTopicNameException("Invalid Kafka Topic Name"); } if (topicType == TopicType.INTERNAL) { List<String> prefixes = getAllowedPrefixes(application).getInternalTopicPrefixes(); if (prefixes.stream().noneMatch(prefix -> topicName.startsWith(prefix))) { throw new InvalidTopicNameException("Wrong prefix used for internal topic of this application"); } // OK; everything else does not matter for us return; } TopicNamingConfig namingConfig = getTopicNamingConfig(topicType); if (namingConfig == null) { throw new IllegalArgumentException("Invalid Topic Type: " + topicType); } boolean anyMatch = false; for (BusinessCapability capability : application.getBusinessCapabilities()) { List<String> allowedFormats = formatMulti(namingConfig.getNameFormat(), application, capability); anyMatch = allowedFormats.stream() .anyMatch(format -> matchesFormat(topicName, format, namingConfig.getAdditionRules())); if (anyMatch) { break; } } if (!anyMatch) { throw new InvalidTopicNameException("Invalid topic name: Does not adhere to topic naming convention"); } } @Override public String normalize(String name) { CaseStrategy strategy = config.getNormalizationStrategy(); if (strategy == null) { strategy = CaseStrategy.KEBAB_CASE; } List<String> words = extractWords(Transliterator.getInstance("de-ASCII").transliterate(name)); return strategy.format(words); } private boolean matchesFormat(String topicName, String format, AdditionNamingRules additionRules) { if (!format.contains(NamingConfig.PARAM_ADDITION)) { // this must be a bad format - only one topic name per application or business capability?! return false; } String commonPrefix = format.substring(0, format.indexOf(NamingConfig.PARAM_ADDITION)); if (!topicName.startsWith(commonPrefix)) { return false; } String addition = topicName.substring(commonPrefix.length()); List<String> sections = !StringUtils.hasLength(additionRules.getAllowedSeparators()) ? List.of(addition) : Arrays.asList(addition.split("[" + additionRules.getAllowedSeparators().replace("-", "\\-") + "]")); for (String section : sections) { if (!matchesSectionRules(section, additionRules)) { return false; } } return true; } private boolean matchesSectionRules(String section, AdditionNamingRules rules) { if (CaseStrategy.PASCAL_CASE.matches(section) && rules.isAllowPascalCase()) { return true; } if (CaseStrategy.CAMEL_CASE.matches(section) && rules.isAllowCamelCase()) { return true; } if (CaseStrategy.KEBAB_CASE.matches(section) && rules.isAllowKebabCase()) { return true; } if (CaseStrategy.SNAKE_CASE.matches(section) && rules.isAllowSnakeCase()) { return true; } // Lowercase is OK, but only if NO other case is allowed (note that lowercase would be a subset of camelCase and // kebab-case, so no need to check them here) return CaseStrategy.LOWERCASE.matches(section) && !rules.isAllowSnakeCase() && !rules.isAllowCamelCase(); } private String formatSingle(String formatString, KnownApplication application, BusinessCapability capability) { if (formatString.contains(NamingConfig.PARAM_APP_OR_ALIAS)) { formatString = formatString.replace(NamingConfig.PARAM_APP_OR_ALIAS, NamingConfig.PARAM_APPLICATION); } return formatString.replace(NamingConfig.PARAM_APPLICATION, normalize(application.getName())).replace( NamingConfig.PARAM_BUSINESS_CAPABILITY, capability == null ? "" : normalize(capability.getName())); } private List<String> formatMulti(String formatString, KnownApplication application, BusinessCapability capability) { if (!formatString.contains(NamingConfig.PARAM_APP_OR_ALIAS)) { return List.of(formatSingle(formatString, application, capability)); } List<String> appOrAlias = new ArrayList<>(); appOrAlias.add(normalize(application.getName())); application.getAliases().stream().map(alias -> normalize(alias)).forEach(appOrAlias::add); return appOrAlias.stream().map(name -> formatString.replace(NamingConfig.PARAM_APPLICATION, appOrAlias.get(0)) .replace(NamingConfig.PARAM_APP_OR_ALIAS, name)).collect(Collectors.toList()); } private List<String> extractWords(String name) { StringBuilder currentWord = new StringBuilder(); List<String> result = new ArrayList<>(); Runnable addWord = () -> { if (currentWord.length() > 0) { result.add(currentWord.toString()); currentWord.setLength(0); } }; for (int i = 0; i < name.length(); i++) { if (Character.isLetter(name.charAt(i)) || Character.isDigit(name.charAt(i))) { currentWord.append(name.charAt(i)); } else { addWord.run(); } } addWord.run(); return result; } private TopicNamingConfig getTopicNamingConfig(TopicType topicType) { switch (topicType) { case EVENTS: return config.getEvents(); case DATA: return config.getData(); case COMMANDS: return config.getCommands(); default: return null; } } }
9,050
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SecurityConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/SecurityConfig.java
package com.hermesworld.ais.galapagos.security; import com.hermesworld.ais.galapagos.security.config.GalapagosSecurityProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.convert.converter.Converter; import org.springframework.lang.NonNull; import org.springframework.security.config.Customizer; import org.springframework.security.config.annotation.method.configuration.EnableMethodSecurity; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configurers.oauth2.server.resource.OAuth2ResourceServerConfigurer; import org.springframework.security.config.http.SessionCreationPolicy; import org.springframework.security.core.GrantedAuthority; import org.springframework.security.core.authority.SimpleGrantedAuthority; import org.springframework.security.core.session.SessionRegistryImpl; import org.springframework.security.oauth2.jwt.Jwt; import org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter; import org.springframework.security.oauth2.server.resource.authentication.JwtGrantedAuthoritiesConverter; import org.springframework.security.web.SecurityFilterChain; import org.springframework.security.web.authentication.session.RegisterSessionAuthenticationStrategy; import org.springframework.security.web.authentication.session.SessionAuthenticationStrategy; import java.util.Collection; import java.util.Locale; import java.util.stream.Collectors; @Configuration @EnableMethodSecurity(securedEnabled = true, prePostEnabled = false) public class SecurityConfig { @Bean SecurityFilterChain filterChain(HttpSecurity http, GalapagosSecurityProperties config) throws Exception { http.csrf(csrf -> csrf.disable()); http.sessionManagement(session -> session.sessionCreationPolicy(SessionCreationPolicy.STATELESS)); http.authorizeHttpRequests(reg -> reg.requestMatchers("/api/**").hasRole("USER").anyRequest().permitAll()); http.oauth2ResourceServer(conf -> conf.jwt(jwtCustomizer(config))); return http.build(); } @Bean public SessionAuthenticationStrategy sessionAuthenticationStrategy() { return new RegisterSessionAuthenticationStrategy(new SessionRegistryImpl()); } private Customizer<OAuth2ResourceServerConfigurer<HttpSecurity>.JwtConfigurer> jwtCustomizer( GalapagosSecurityProperties config) { return jwtConfigurer -> jwtConfigurer.jwtAuthenticationConverter(jwtAuthenticationConverter(config)); } private JwtAuthenticationConverter jwtAuthenticationConverter(GalapagosSecurityProperties config) { JwtAuthenticationConverter converter = new JwtAuthenticationConverter(); converter.setPrincipalClaimName(config.getJwtUserNameClaim()); JwtGrantedAuthoritiesConverter authoritiesConverter = new JwtGrantedAuthoritiesConverter(); authoritiesConverter.setAuthoritiesClaimName(config.getJwtRoleClaim()); authoritiesConverter.setAuthorityPrefix("ROLE_"); converter.setJwtGrantedAuthoritiesConverter(new UpperCaseJwtGrantedAuthoritiesConverter(authoritiesConverter)); return converter; } private record UpperCaseJwtGrantedAuthoritiesConverter(JwtGrantedAuthoritiesConverter delegate) implements Converter<Jwt, Collection<GrantedAuthority>> { @Override public Collection<GrantedAuthority> convert(@NonNull Jwt source) { return mapToUpperCase(delegate.convert(source)); } private Collection<GrantedAuthority> mapToUpperCase(Collection<GrantedAuthority> authorities) { return authorities.stream().map(a -> new SimpleGrantedAuthority(a.getAuthority().toUpperCase(Locale.US))) .collect(Collectors.toSet()); } } }
3,920
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CurrentUserService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/CurrentUserService.java
package com.hermesworld.ais.galapagos.security; import java.util.Optional; public interface CurrentUserService { Optional<String> getCurrentUserName(); Optional<AuditPrincipal> getCurrentPrincipal(); Optional<String> getCurrentUserEmailAddress(); /** * Checks from the Security Context, if the current user has the role of an Administrator * * @return <code>true</code> if the current user has the role of an Administrator, <code>false</code> otherwise */ boolean isAdmin(); }
524
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosAuditEventType.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/GalapagosAuditEventType.java
package com.hermesworld.ais.galapagos.security; /** * Types of audit-relevant events provided by Galapagos. * * @author AlbrechtFlo * */ public enum GalapagosAuditEventType { APPLICATION_OWNER_REQUESTED, APPLICATION_OWNER_REQUEST_CANCELED, APPLICATION_OWNER_REQUEST_UPDATED, TOPIC_CREATED, TOPIC_DELETED, TOPIC_UPDATED, TOPIC_SUBSCRIBED, TOPIC_UNSUBSCRIBED, TOPIC_SCHEMA_ADDED, TOPIC_PRODUCER_APPLICATION_ADDED, TOPIC_PRODUCER_APPLICATION_REMOVED, TOPIC_OWNER_CHANGED, @Deprecated APPLICATION_CERTIFICATE_CREATED, APPLICATION_REGISTERED, SUBSCRIPTION_UPDATED }
584
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
WebFrontendConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/WebFrontendConfig.java
package com.hermesworld.ais.galapagos.security; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.web.context.request.RequestContextListener; import org.springframework.web.servlet.config.annotation.EnableWebMvc; import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; @Configuration @EnableWebMvc public class WebFrontendConfig implements WebMvcConfigurer { @Override public void addResourceHandlers(ResourceHandlerRegistry registry) { registry.addResourceHandler("/app/**").addResourceLocations("classpath:/static/app/"); registry.addResourceHandler("/assets/**").addResourceLocations("classpath:/static/app/assets/"); } /** * This ensures that the current HTTP Request is accessible via ThreadLocal accessory, e.g. in * AuditEventRepositoryImpl. * * @return A new RequestContextListener bean. */ @Bean public RequestContextListener requestContextListener() { return new RequestContextListener(); } }
1,176
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AuditPrincipal.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/AuditPrincipal.java
package com.hermesworld.ais.galapagos.security; import lombok.Getter; @Getter public final class AuditPrincipal { private String name; private String fullName; public AuditPrincipal(String name, String fullName) { this.name = name; this.fullName = fullName; } public String getFullName() { return fullName; } }
365
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosSecurityProperties.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/config/GalapagosSecurityProperties.java
package com.hermesworld.ais.galapagos.security.config; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; import org.springframework.lang.NonNull; import org.springframework.util.ObjectUtils; import org.springframework.validation.Errors; import org.springframework.validation.Validator; import org.springframework.validation.annotation.Validated; /** * Configuration properties to fine-control how verified JWT tokens are mapped to username and roles. See * application.properties for details. */ @Configuration @ConfigurationProperties("galapagos.security") @Validated @Getter @Setter @Slf4j public class GalapagosSecurityProperties implements Validator { // @NotEmpty private String jwtRoleClaim; // @NotEmpty private String jwtUserNameClaim; // @NotEmpty private String jwtDisplayNameClaim; // @NotEmpty private String jwtEmailClaim; @Override public boolean supports(@NonNull Class<?> clazz) { return GalapagosSecurityProperties.class.isAssignableFrom(clazz); } @Override public void validate(@NonNull Object target, @NonNull Errors errors) { // TODO remove, and replace by the NotEmpty annotations, once we do no longer want to provide this message. if (target instanceof GalapagosSecurityProperties properties) { if (ObjectUtils.isEmpty(properties.getJwtRoleClaim()) || ObjectUtils.isEmpty(properties.getJwtUserNameClaim()) || ObjectUtils.isEmpty(properties.getJwtDisplayNameClaim()) || ObjectUtils.isEmpty(properties.getJwtEmailClaim())) { errors.reject("MISSING_OAUTH2_PROPERTIES", "Missing Galapagos OAuth2 properties. Maybe you did not perform required migration steps for Galapagos 2.8.0?\nPlease refer to https://github.com/HermesGermany/galapagos/blob/main/docs/Migration%20Guide%202.8.md"); } } } }
2,100
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
OAuthConfigController.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/impl/OAuthConfigController.java
package com.hermesworld.ais.galapagos.security.impl; import com.hermesworld.ais.galapagos.security.config.GalapagosSecurityProperties; import lombok.extern.slf4j.Slf4j; import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties; import org.springframework.http.MediaType; import org.springframework.security.oauth2.client.registration.ClientRegistrations; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RestController; import java.util.Map; @RestController @Slf4j public class OAuthConfigController { private final OAuth2ClientProperties oAuth2ClientProperties; private final GalapagosSecurityProperties galapagosSecurityProperties; public OAuthConfigController(OAuth2ClientProperties oAuth2ClientProperties, GalapagosSecurityProperties galapagosSecurityProperties) { this.oAuth2ClientProperties = oAuth2ClientProperties; this.galapagosSecurityProperties = galapagosSecurityProperties; } @GetMapping(value = "/oauth2/config.json", produces = MediaType.APPLICATION_JSON_VALUE) public Map<String, Object> getOauthConfig() { Map<String, OAuth2ClientProperties.Registration> registrations = oAuth2ClientProperties.getRegistration(); if (registrations.isEmpty()) { log.error("No Spring Security OAuth2 client registrations found. Cannot provide OAuth2 config to UI."); return Map.of(); } if (registrations.size() > 1) { log.error( "More than one Spring Security OAuth2 client registration found. Cannot provide OAuth2 config to UI."); return Map.of(); } var regKey = registrations.entrySet().iterator().next().getKey(); var registration = registrations.get(regKey); var provider = oAuth2ClientProperties.getProvider().get(regKey); if (provider == null) { log.error("No Spring Security OAuth2 provider found with id \"{}\". Cannot provide OAuth2 config to UI.", regKey); return Map.of(); } // Use .well-known endpoint to retrieve correct token endpoint var fullReg = ClientRegistrations.fromOidcIssuerLocation(provider.getIssuerUri()) .clientId(registration.getClientId()).build(); return Map.of("clientId", registration.getClientId(), "scope", registration.getScope(), "issuerUri", provider.getIssuerUri(), "tokenEndpoint", fullReg.getProviderDetails().getTokenUri(), "userNameClaim", galapagosSecurityProperties.getJwtUserNameClaim(), "displayNameClaim", galapagosSecurityProperties.getJwtDisplayNameClaim(), "rolesClaim", galapagosSecurityProperties.getJwtRoleClaim()); } }
2,826
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
DefaultCurrentUserService.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/impl/DefaultCurrentUserService.java
package com.hermesworld.ais.galapagos.security.impl; import com.hermesworld.ais.galapagos.events.EventContextSource; import com.hermesworld.ais.galapagos.security.AuditPrincipal; import com.hermesworld.ais.galapagos.security.CurrentUserService; import com.hermesworld.ais.galapagos.security.config.GalapagosSecurityProperties; import org.springframework.security.core.context.SecurityContext; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationToken; import org.springframework.stereotype.Component; import java.util.HashMap; import java.util.Map; import java.util.Optional; @Component public class DefaultCurrentUserService implements CurrentUserService, EventContextSource { private final GalapagosSecurityProperties securityConfig; public DefaultCurrentUserService(GalapagosSecurityProperties securityConfig) { this.securityConfig = securityConfig; } @Override public Optional<String> getCurrentUserName() { SecurityContext context = SecurityContextHolder.getContext(); if (context.getAuthentication() == null || context.getAuthentication().getPrincipal() == null) { return Optional.empty(); } return Optional.of(context.getAuthentication().getName()); } @Override public Optional<AuditPrincipal> getCurrentPrincipal() { return getAuthenticationToken() .map(t -> new AuditPrincipal(t.getToken().getClaimAsString(securityConfig.getJwtUserNameClaim()), t.getToken().getClaimAsString(securityConfig.getJwtDisplayNameClaim()))); } @Override public Optional<String> getCurrentUserEmailAddress() { return getAuthenticationToken() .map(token -> token.getToken().getClaimAsString(securityConfig.getJwtEmailClaim())); } private Optional<JwtAuthenticationToken> getAuthenticationToken() { SecurityContext context = SecurityContextHolder.getContext(); if (context.getAuthentication() instanceof JwtAuthenticationToken token) { return Optional.of(token); } return Optional.empty(); } @Override public Map<String, Object> getContextValues() { Map<String, Object> result = new HashMap<>(); result.put("username", getCurrentUserName().orElse(null)); result.put("email", getCurrentUserEmailAddress().orElse(null)); result.put("principal", getCurrentPrincipal().orElse(null)); return result; } @Override public boolean isAdmin() { SecurityContext context = SecurityContextHolder.getContext(); if (context.getAuthentication() == null || context.getAuthentication().getAuthorities() == null) { return false; } return context.getAuthentication().getAuthorities().stream() .anyMatch(auth -> auth.getAuthority().equals("ROLE_ADMIN")); } }
2,994
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AuditEventRepositoryImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/impl/AuditEventRepositoryImpl.java
package com.hermesworld.ais.galapagos.security.impl; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.module.SimpleModule; import com.hermesworld.ais.galapagos.util.JsonUtil; import jakarta.servlet.http.HttpServletRequest; import lombok.extern.slf4j.Slf4j; import org.springframework.boot.actuate.audit.AuditEvent; import org.springframework.boot.actuate.audit.AuditEventRepository; import org.springframework.boot.actuate.audit.InMemoryAuditEventRepository; import org.springframework.stereotype.Component; import org.springframework.web.context.request.RequestContextHolder; import org.springframework.web.context.request.ServletRequestAttributes; import java.util.HashMap; import java.util.Map; /** * This implementation of the {@link AuditEventRepository} interface just extends the in-memory implementation of Spring * Boot Actuator and logs each added AuditEvent to the special logger <code>galapagos.audit</code>. The AuditEvent is * wrapped in an info structure also containing information about the current HTTP request, if any. The info structure * is logged in JSON format. * * @author AlbrechtFlo * */ @Component @Slf4j public class AuditEventRepositoryImpl extends InMemoryAuditEventRepository { private final ObjectMapper objectMapper; public AuditEventRepositoryImpl() { this.objectMapper = JsonUtil.newObjectMapper(); this.objectMapper.setConfig( this.objectMapper.getSerializationConfig().without(SerializationFeature.FAIL_ON_EMPTY_BEANS)); SimpleModule mapperModule = new SimpleModule(); this.objectMapper.registerModule(mapperModule); } @Override public void add(AuditEvent event) { super.add(event); HttpServletRequest request = null; try { if (RequestContextHolder.currentRequestAttributes() instanceof ServletRequestAttributes) { request = ((ServletRequestAttributes) RequestContextHolder.currentRequestAttributes()).getRequest(); } } catch (IllegalStateException e) { // OK, no request log.trace("No current request found for Audit log, not including request information"); } Map<String, Object> logEntry = new HashMap<>(); if (request != null) { logEntry.put("request", toLogMap(request)); } logEntry.put("event", event); try { log.info(this.objectMapper.writeValueAsString(logEntry)); } catch (JsonProcessingException e) { // critical, as Audit log entry missing throw new RuntimeException("Could not serialize Audit log entry", e); } } private static Map<String, String> toLogMap(HttpServletRequest request) { Map<String, String> result = new HashMap<>(); result.put("url", request.getRequestURL().toString()); result.put("method", request.getMethod()); result.put("path", request.getPathInfo()); result.put("clientIp", request.getRemoteAddr()); return result; } }
3,223
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AuditEventsListener.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/impl/AuditEventsListener.java
package com.hermesworld.ais.galapagos.security.impl; import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest; import com.hermesworld.ais.galapagos.events.*; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.security.GalapagosAuditEventType; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import com.hermesworld.ais.galapagos.util.FutureUtil; import org.springframework.boot.actuate.audit.AuditEvent; import org.springframework.boot.actuate.audit.AuditEventRepository; import org.springframework.stereotype.Component; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; @Component public class AuditEventsListener implements TopicEventsListener, SubscriptionEventsListener, ApplicationEventsListener { private final AuditEventRepository auditRepository; private static final String APPLICATION_ID = "applicationId"; private static final String ENVIRONMENT_ID = "environmentId"; private static final String NAME = "name"; public AuditEventsListener(AuditEventRepository auditRepository) { this.auditRepository = auditRepository; } @Override public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) { TopicMetadata topic = event.getMetadata(); TopicCreateParams params = event.getTopicCreateParams(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, topic.getName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditData.put("description", topic.getDescription()); auditData.put("topicType", topic.getType()); auditData.put(APPLICATION_ID, topic.getOwnerApplicationId()); auditData.put("subscriptionApprovalRequired", topic.isSubscriptionApprovalRequired()); auditData.put("numPartitions", params.getNumberOfPartitions()); auditData.put("topicConfig", params.getTopicConfigs()); auditRepository .add(new AuditEvent(getUserName(event), GalapagosAuditEventType.TOPIC_CREATED.name(), auditData)); return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) { TopicMetadata topic = event.getMetadata(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, topic.getName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditRepository .add(new AuditEvent(getUserName(event), GalapagosAuditEventType.TOPIC_DELETED.name(), auditData)); return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) { return handleTopicEvent(event, "TOPIC_DEPRECATED"); } @Override public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) { return handleTopicEvent(event, "TOPIC_DESCRIPTION_CHANGED"); } @Override public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) { return handleTopicEvent(event, "TOPIC_UNDEPRECATED"); } @Override public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) { TopicMetadata topic = event.getMetadata(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, topic.getName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditData.put("schemaVersion", event.getNewSchema().getSchemaVersion()); auditData.put("jsonSchema", event.getNewSchema().getJsonSchema()); auditRepository .add(new AuditEvent(getUserName(event), GalapagosAuditEventType.TOPIC_SCHEMA_ADDED.name(), auditData)); return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) { TopicMetadata topic = event.getMetadata(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, topic.getName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditRepository .add(new AuditEvent(getUserName(event), GalapagosAuditEventType.TOPIC_SCHEMA_ADDED.name(), auditData)); return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) { return handleTopicEvent(event, "TOPIC_SUBSCRIPTION_APPROVAL_REQUIRED_FLAG_CHANGED"); } @Override public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) { return handleTopicEvent(event, GalapagosAuditEventType.TOPIC_PRODUCER_APPLICATION_ADDED.name()); } @Override public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) { return handleTopicEvent(event, GalapagosAuditEventType.TOPIC_PRODUCER_APPLICATION_REMOVED.name()); } @Override public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) { return handleTopicEvent(event, GalapagosAuditEventType.TOPIC_OWNER_CHANGED.name()); } @Override public CompletableFuture<Void> handleApplicationRegistered(ApplicationEvent event) { Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(APPLICATION_ID, event.getMetadata().getApplicationId()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditData.put("authentication", event.getMetadata().getAuthenticationJson()); auditRepository.add( new AuditEvent(getUserName(event), GalapagosAuditEventType.APPLICATION_REGISTERED.name(), auditData)); return FutureUtil.noop(); } @Override public CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationAuthenticationChangeEvent event) { return handleApplicationRegistered(event); } @Override public CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequestEvent event) { return handleApplicationOwnerRequest(event, GalapagosAuditEventType.APPLICATION_OWNER_REQUESTED, false); } @Override public CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequestEvent event) { return handleApplicationOwnerRequest(event, GalapagosAuditEventType.APPLICATION_OWNER_REQUEST_UPDATED, true); } @Override public CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequestEvent event) { return handleApplicationOwnerRequest(event, GalapagosAuditEventType.APPLICATION_OWNER_REQUEST_CANCELED, false); } @Override public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event) { return handleSubscriptionEvent(event, GalapagosAuditEventType.TOPIC_SUBSCRIBED); } @Override public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event) { return handleSubscriptionEvent(event, GalapagosAuditEventType.TOPIC_UNSUBSCRIBED); } @Override public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event) { return handleSubscriptionEvent(event, GalapagosAuditEventType.SUBSCRIPTION_UPDATED); } private CompletableFuture<Void> handleApplicationOwnerRequest(ApplicationOwnerRequestEvent event, GalapagosAuditEventType type, boolean includeStatus) { ApplicationOwnerRequest request = event.getRequest(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(APPLICATION_ID, request.getApplicationId()); auditData.put("requestingUser", request.getUserName()); if (includeStatus) { auditData.put("newStatus", request.getState()); } auditRepository.add(new AuditEvent(getUserName(event), type.name(), auditData)); return FutureUtil.noop(); } private CompletableFuture<Void> handleTopicEvent(TopicEvent event, String topicEventType) { TopicMetadata topic = event.getMetadata(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, topic.getName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditData.put("description", topic.getDescription()); auditData.put("topicType", topic.getType()); auditData.put(APPLICATION_ID, topic.getOwnerApplicationId()); auditData.put("subscriptionApprovalRequired", topic.isSubscriptionApprovalRequired()); auditData.put("deprecated", topic.isDeprecated()); auditData.put("deprecationText", topic.getDeprecationText()); if (topic.getEolDate() != null) { auditData.put("endOfLife", topic.getEolDate().toString()); } if (topic.getProducers() != null) { auditData.put("producers", topic.getProducers()); } auditData.put("topicEventType", topicEventType); auditRepository .add(new AuditEvent(getUserName(event), GalapagosAuditEventType.TOPIC_UPDATED.name(), auditData)); return FutureUtil.noop(); } private CompletableFuture<Void> handleSubscriptionEvent(SubscriptionEvent event, GalapagosAuditEventType eventType) { SubscriptionMetadata metadata = event.getMetadata(); Map<String, Object> auditData = new LinkedHashMap<>(); auditData.put(NAME, metadata.getTopicName()); auditData.put(ENVIRONMENT_ID, event.getContext().getKafkaCluster().getId()); auditData.put("description", metadata.getDescription()); auditData.put(APPLICATION_ID, metadata.getClientApplicationId()); auditData.put("state", metadata.getState() == null ? null : metadata.getState().toString()); auditRepository.add(new AuditEvent(getUserName(event), eventType.name(), auditData)); return FutureUtil.noop(); } private String getUserName(AbstractGalapagosEvent event) { return event.getContext().getContextValue("username").map(Object::toString).orElse(null); } }
10,351
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
KafkaExecutorFactoryImpl.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/security/impl/KafkaExecutorFactoryImpl.java
package com.hermesworld.ais.galapagos.security.impl; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.springframework.security.concurrent.DelegatingSecurityContextExecutorService; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.stereotype.Component; import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory; /** * Implementation of the {@link KafkaExecutorFactory} interface which returns a * {@link DelegatingSecurityContextExecutorService} based on a simple single-threaded executor. This allows Galapagos * services like <code>TopicService</code> to access the Security Context (via <code>CurrentUserService</code>) and * determine the current user even in sub-Threds. * * @author AlbrechtFlo * */ @Component public class KafkaExecutorFactoryImpl implements KafkaExecutorFactory { @Override public ExecutorService newExecutor() { return new DelegatingSecurityContextExecutorService(Executors.newSingleThreadExecutor(), SecurityContextHolder.getContext()); } }
1,117
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationCnDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/ApplicationCnDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; import lombok.Setter; @Getter @Setter @JsonSerialize public class ApplicationCnDto { private String applicationId; private String name; private String cn; public ApplicationCnDto(String applicationId, String name, String cn) { this.applicationId = applicationId; this.name = name; this.cn = cn; } }
496
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UISupportController.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/UISupportController.java
package com.hermesworld.ais.galapagos.uisupport.controller; import com.hermesworld.ais.galapagos.applications.ApplicationsService; import com.hermesworld.ais.galapagos.applications.BusinessCapability; import com.hermesworld.ais.galapagos.applications.KnownApplication; import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthenticationModule; import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule; import com.hermesworld.ais.galapagos.changes.config.GalapagosChangesConfig; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; import com.hermesworld.ais.galapagos.kafka.KafkaClusters; import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule; import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig; import com.hermesworld.ais.galapagos.kafka.util.KafkaTopicConfigHelper; import com.hermesworld.ais.galapagos.naming.NamingService; import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig; import com.hermesworld.ais.galapagos.topics.service.TopicService; import com.hermesworld.ais.galapagos.util.CertificateUtil; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.config.TopicConfig; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.util.StreamUtils; import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.*; import org.springframework.web.server.ResponseStatusException; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.security.cert.CertificateException; import java.time.Period; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; /** * Crossfunctional controller for calculating sensible defaults and other useful values mainly for use in UIs. <br> * As such, this controller contains some "business logic", e.g. about the combination effects of some Kafka * configuration properties. * * @author AlbrechtFlo * */ @RestController @Slf4j public class UISupportController { private final ApplicationsService applicationsService; private final TopicService topicService; private KafkaClusters kafkaClusters; private final NamingService namingService; private final GalapagosTopicConfig topicConfig; private final GalapagosChangesConfig changesConfig; private final CustomLinksConfig customLinksConfig; private static final Supplier<ResponseStatusException> badRequest = () -> new ResponseStatusException( HttpStatus.BAD_REQUEST); private static final Supplier<ResponseStatusException> notFound = () -> new ResponseStatusException( HttpStatus.NOT_FOUND); private static final int TEN_MEGABYTES = (int) Math.pow(2, 20) * 10; private static final int TEN_GIGABYTES = (int) Math.pow(2, 30) * 10; private static final long ONE_HOUR = TimeUnit.HOURS.toMillis(1); private static final long ONE_WEEK = TimeUnit.DAYS.toMillis(7); public UISupportController(ApplicationsService applicationsService, TopicService topicService, KafkaClusters kafkaClusters, NamingService namingService, GalapagosTopicConfig topicConfig, CustomLinksConfig customLinksConfig, GalapagosChangesConfig changesConfig) { this.applicationsService = applicationsService; this.topicService = topicService; this.kafkaClusters = kafkaClusters; this.namingService = namingService; this.topicConfig = topicConfig; this.customLinksConfig = customLinksConfig; this.changesConfig = changesConfig; } /** * Returns all configuration elements of the backend which are also relevant for the frontend. Also includes the * custom links to be shown on the dashboard. * * @return All configuration elements of the backend which are also relevant for the frontend. */ @GetMapping(value = "/api/util/uiconfig", produces = MediaType.APPLICATION_JSON_VALUE) public UiConfigDto getUiConfig() { UiConfigDto result = new UiConfigDto(); result.setMinDeprecationTime(toPeriodDto(topicConfig.getMinDeprecationTime())); result.setCustomLinks(customLinksConfig.getLinks()); result.setChangelogEntries(changesConfig.getEntries()); result.setChangelogMinDays(changesConfig.getMinDays()); result.setDefaultPicture(changesConfig.getDefaultPicture()); result.setProfilePicture(changesConfig.getProfilePicture()); result.setCustomImageUrl(changesConfig.getCustomImageUrl()); return result; } @GetMapping(value = "/api/util/customlinks", produces = MediaType.APPLICATION_JSON_VALUE) public List<CustomLinkConfig> getCustomLinks() { return customLinksConfig.getLinks(); } @GetMapping(value = "/api/util/framework-config/{environmentId}/{framework}", produces = MediaType.TEXT_PLAIN_VALUE) public String getFrameworkConfigTemplate(@PathVariable String environmentId, @PathVariable String framework) { switch (framework) { case "spring": return springConfigBuilder.apply(environmentId); case "micronaut": return micronautConfigBuilder.apply(environmentId); } throw notFound.get(); } @PostMapping(value = "/api/util/topic-create-defaults", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) public TopicCreateDefaultsDto getDefaultTopicCreateParams(@RequestBody QueryTopicCreateDefaultsDto query) { TopicCreateDefaultsDto result = new TopicCreateDefaultsDto(); if (StringUtils.hasLength(query.getApplicationId()) && StringUtils.hasLength(query.getEnvironmentId()) && query.getTopicType() != null) { result.setTopicNameSuggestion(getTopicNameSuggestion(query)); } result.setDefaultPartitionCount(topicConfig.getDefaultPartitionCount()); Map<String, String> defaultConfigs = new HashMap<>(); defaultConfigs.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); defaultConfigs.put(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(TimeUnit.DAYS.toMillis(7))); if (query.getExpectedMessageCountPerDay() != null && query.getExpectedAvgMessageSizeBytes() != null) { int cnt = query.getExpectedMessageCountPerDay(); long bytesPerDay = cnt * query.getExpectedAvgMessageSizeBytes(); long segmentBytes = Math.max(TEN_MEGABYTES, bytesPerDay); segmentBytes = Math.min(segmentBytes, TEN_GIGABYTES); defaultConfigs.put(TopicConfig.SEGMENT_BYTES_CONFIG, String.valueOf(segmentBytes)); } // we set segment.ms to max one week to avoid unexpected effects when e.g. testing retention time settings. // If retention is lower, segment.ms will be set to retention time. if (query.getRetentionTimeMs() != null) { long segmentMs = Math.max(query.getRetentionTimeMs(), ONE_HOUR); segmentMs = Math.min(segmentMs, ONE_WEEK); defaultConfigs.put(TopicConfig.SEGMENT_MS_CONFIG, String.valueOf(segmentMs)); } result.setDefaultTopicConfigs(defaultConfigs); return result; } @GetMapping(value = "/api/util/default-topic-config/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE) public Map<String, String> getDefaultTopicConfig(@PathVariable String environmentId) { KafkaCluster env = kafkaClusters.getEnvironment(environmentId).orElseThrow(notFound); try { return env.getDefaultTopicConfig().get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return Collections.emptyMap(); } catch (ExecutionException e) { throw handleExecutionException(e, "Could not query default topic configuration: "); } } @GetMapping(value = "/api/util/supported-kafka-configs", produces = MediaType.APPLICATION_JSON_VALUE) public List<KafkaConfigDescriptionDto> getSupportedKafkaConfigs() { return KafkaTopicConfigHelper.getConfigKeysAndDescription().entrySet().stream() .map(entry -> new KafkaConfigDescriptionDto(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); } @GetMapping(value = "/api/util/environments-for-topic/{topicName:.+}", produces = MediaType.APPLICATION_JSON_VALUE) public List<String> getEnvironmentsWithTopic(@PathVariable String topicName) { return kafkaClusters.getEnvironmentIds().stream() .map(envId -> topicService.getTopic(envId, topicName).map(t -> envId).orElse(null)) .filter(s -> s != null).collect(Collectors.toList()); } @GetMapping(value = "/api/util/supported-devcert-environments", produces = MediaType.APPLICATION_JSON_VALUE) public List<String> getEnvironmentsWithDeveloperCertificateSupport() { return kafkaClusters.getEnvironmentIds().stream() .map(id -> supportsEnvironmentDeveloperCertificate(id) ? id : null).filter(Objects::nonNull) .collect(Collectors.toList()); } @GetMapping(value = "/api/util/supported-apikey-environments", produces = MediaType.APPLICATION_JSON_VALUE) public List<String> getEnvironmentsWithDeveloperApikeySupport() { return kafkaClusters.getEnvironmentIds().stream().map(id -> supportsEnvironmentDeveloperApiKey(id) ? id : null) .filter(Objects::nonNull).collect(Collectors.toList()); } private boolean supportsEnvironmentDeveloperCertificate(String environmentId) { return getCertificatesAuthenticationModuleForEnv(environmentId) .map(CertificatesAuthenticationModule::supportsDeveloperCertificates).orElse(false); } private Optional<CertificatesAuthenticationModule> getCertificatesAuthenticationModuleForEnv(String environmentId) { CertificatesAuthenticationModule certificateModule; Optional<KafkaAuthenticationModule> authModule = kafkaClusters.getAuthenticationModule(environmentId); if (authModule.isPresent() && authModule.get() instanceof CertificatesAuthenticationModule) { certificateModule = (CertificatesAuthenticationModule) authModule.get(); return Optional.of(certificateModule); } return Optional.empty(); } private boolean supportsEnvironmentDeveloperApiKey(String environmentId) { return getConfluentAuthenticationModuleForEnv(environmentId) .map(ConfluentCloudAuthenticationModule::supportsDeveloperApiKeys).orElse(false); } private Optional<ConfluentCloudAuthenticationModule> getConfluentAuthenticationModuleForEnv(String environmentId) { ConfluentCloudAuthenticationModule confluentModule; Optional<KafkaAuthenticationModule> authModule = kafkaClusters.getAuthenticationModule(environmentId); if (authModule.isPresent() && authModule.get() instanceof ConfluentCloudAuthenticationModule) { confluentModule = (ConfluentCloudAuthenticationModule) authModule.get(); return Optional.of(confluentModule); } return Optional.empty(); } @GetMapping(value = "/api/util/common-name/{applicationId}", produces = MediaType.APPLICATION_JSON_VALUE) public ApplicationCnDto getApplicationCommonName(@PathVariable String applicationId) { return applicationsService.getKnownApplication(applicationId) .map(app -> new ApplicationCnDto(app.getId(), app.getName(), CertificateUtil.toAppCn(app.getName()))) .orElseThrow(notFound); } private String getTopicNameSuggestion(QueryTopicCreateDefaultsDto query) { KnownApplication app = applicationsService.getKnownApplication(query.getApplicationId()) .orElseThrow(badRequest); BusinessCapability cap = app.getBusinessCapabilities().stream() .filter(bc -> bc.getId().equals(query.getBusinessCapabilityId())).findFirst().orElse(null); String name = namingService.getTopicNameSuggestion(query.getTopicType(), app, cap); if (name == null) { throw badRequest.get(); } return name; } private ResponseStatusException handleExecutionException(ExecutionException e, String msgPrefix) { Throwable t = e.getCause(); if (t instanceof CertificateException) { return new ResponseStatusException(HttpStatus.BAD_REQUEST, msgPrefix + t.getMessage()); } if (t instanceof NoSuchElementException) { return new ResponseStatusException(HttpStatus.NOT_FOUND); } if ((t instanceof IllegalStateException) || (t instanceof IllegalArgumentException)) { return new ResponseStatusException(HttpStatus.BAD_REQUEST, t.getMessage()); } log.error("Unhandled exception in UISupportController", t); return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR); } private static String readConfigTemplate(String framework, String authenticationType) throws IOException { try (InputStream in = UISupportController.class.getClassLoader() .getResourceAsStream("configtemplates/" + framework + "." + authenticationType + ".template.yaml")) { return StreamUtils.copyToString(in, StandardCharsets.UTF_8); } } private static PeriodDto toPeriodDto(Period period) { return new PeriodDto(period.getYears(), period.getMonths(), period.getDays()); } private final Function<String, String> springConfigBuilder = (environmentId) -> { KafkaEnvironmentConfig config = kafkaClusters.getEnvironmentMetadata(environmentId).orElse(null); if (config == null) { return null; } try { return readConfigTemplate("spring", config.getAuthenticationMode()).replace("${bootstrap.servers}", config.getBootstrapServers()); } catch (IOException e) { log.error("Could not read Spring config template", e); return null; } }; private final Function<String, String> micronautConfigBuilder = (environmentId) -> { KafkaEnvironmentConfig config = kafkaClusters.getEnvironmentMetadata(environmentId).orElse(null); if (config == null) { return null; } String[] bootstrapServers = config.getBootstrapServers().split(","); try { String configFile = readConfigTemplate("micronaut", config.getAuthenticationMode()); StringBuilder sbOut = new StringBuilder(); String[] lines = configFile.split("\\r?\\n"); for (String line : lines) { if (line.contains("${bootstrap.server}")) { for (String server : bootstrapServers) { sbOut.append(line.replace("${bootstrap.server}", server.trim())).append('\n'); } } else { sbOut.append(line).append('\n'); } } return sbOut.toString(); } catch (IOException e) { log.error("Could not read Micronaut config template", e); return null; } }; }
15,532
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CustomLinkConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/CustomLinkConfig.java
package com.hermesworld.ais.galapagos.uisupport.controller; import lombok.Getter; import lombok.Setter; @Getter @Setter public class CustomLinkConfig { private String id; private String href; private String label; private LinkType linkType; }
266
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
CustomLinksConfig.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/CustomLinksConfig.java
package com.hermesworld.ais.galapagos.uisupport.controller; import java.util.ArrayList; import java.util.List; import lombok.Getter; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; @ConfigurationProperties(prefix = "galapagos.custom-links") @Configuration public class CustomLinksConfig { @Getter private List<CustomLinkConfig> links = new ArrayList<>(); public void setLinks(List<CustomLinkConfig> links) { checkElements(links); this.links = links; } private void checkElements(List<CustomLinkConfig> links) throws RuntimeException { for (CustomLinkConfig customLinkConfig : links) { if (customLinkConfig.getId() == null || customLinkConfig.getHref() == null || customLinkConfig.getLabel() == null || customLinkConfig.getLinkType() == null) { throw new RuntimeException( "A field of a custom link must not be empty. Please check application.properties."); } } } }
1,103
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
LinkType.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/LinkType.java
package com.hermesworld.ais.galapagos.uisupport.controller; public enum LinkType { EDUCATIONAL, SOURCECODE, OTHER }
121
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicCreateDefaultsDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/TopicCreateDefaultsDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import java.util.Map; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter public class TopicCreateDefaultsDto { private int defaultPartitionCount; private Map<String, String> defaultTopicConfigs; private String topicNameSuggestion; }
400
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
PeriodDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/PeriodDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter public class PeriodDto { private int years; private int months; private int days; public PeriodDto(int years, int months, int days) { this.years = years; this.months = months; this.days = days; } public PeriodDto() { } }
479
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
UiConfigDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/UiConfigDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.changes.config.ProfilePicture; import lombok.Getter; import lombok.Setter; import java.util.List; @JsonSerialize @Getter @Setter public class UiConfigDto { private PeriodDto minDeprecationTime; private List<CustomLinkConfig> customLinks; private int changelogEntries; private int changelogMinDays; private ProfilePicture profilePicture; private ProfilePicture defaultPicture; private String customImageUrl; }
609
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
KafkaConfigDescriptionDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/KafkaConfigDescriptionDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import lombok.Getter; @Getter public class KafkaConfigDescriptionDto { private String configName; private String configDescription; public KafkaConfigDescriptionDto(String configName, String configDescription) { this.configName = configName; this.configDescription = configDescription; } }
388
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
QueryTopicCreateDefaultsDto.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/uisupport/controller/QueryTopicCreateDefaultsDto.java
package com.hermesworld.ais.galapagos.uisupport.controller; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.hermesworld.ais.galapagos.topics.TopicType; import lombok.Getter; import lombok.Setter; @JsonSerialize @Getter @Setter public class QueryTopicCreateDefaultsDto { private TopicType topicType; private String applicationId; private String environmentId; private String businessCapabilityId; private Integer expectedMessageCountPerDay; private Long expectedAvgMessageSizeBytes; private Long retentionTimeMs; }
579
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicSchemaAddedEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicSchemaAddedEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.topics.SchemaMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.Getter; @Getter public class TopicSchemaAddedEvent extends TopicEvent { private SchemaMetadata newSchema; public TopicSchemaAddedEvent(GalapagosEventContext context, TopicMetadata metadata, SchemaMetadata newSchema) { super(context, metadata); this.newSchema = newSchema; } }
488
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicEventsListener.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicEventsListener.java
package com.hermesworld.ais.galapagos.events; import java.util.concurrent.CompletableFuture; public interface TopicEventsListener { CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event); CompletableFuture<Void> handleTopicDeleted(TopicEvent event); CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event); CompletableFuture<Void> handleTopicDeprecated(TopicEvent event); CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event); CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event); CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event); CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event); CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event); CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event); CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event); }
1,018
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/ApplicationEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import lombok.Getter; @Getter public class ApplicationEvent extends AbstractGalapagosEvent { private ApplicationMetadata metadata; public ApplicationEvent(GalapagosEventContext context, ApplicationMetadata metadata) { super(context); this.metadata = metadata; } }
414
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicOwnerChangeEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicOwnerChangeEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.Getter; @Getter public class TopicOwnerChangeEvent extends TopicEvent { private final String previousOwnerApplicationId; public TopicOwnerChangeEvent(GalapagosEventContext context, String previousOwnerApplicationId, TopicMetadata metadata) { super(context, metadata); this.previousOwnerApplicationId = previousOwnerApplicationId; } }
499
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicRemoveProducerEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicRemoveProducerEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.Getter; @Getter public class TopicRemoveProducerEvent extends TopicEvent { private final String producerApplicationId; public TopicRemoveProducerEvent(GalapagosEventContext context, String producerApplicationId, TopicMetadata metadata) { super(context, metadata); this.producerApplicationId = producerApplicationId; } }
484
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
EventContextSource.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/EventContextSource.java
package com.hermesworld.ais.galapagos.events; import java.util.Map; /** * Interface for components wanting to contribute to the context of a Galapagos event. Usually, this is used to store * some thread-local attributes derived from Security contexts or Request attributes. */ public interface EventContextSource { /** * Builds and returns the map of context values provided by this context source. * * @return A (possibly empty) map of context values, never <code>null</code>. */ Map<String, Object> getContextValues(); }
558
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicAddProducerEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicAddProducerEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.Getter; @Getter public class TopicAddProducerEvent extends TopicEvent { private final String producerApplicationId; public TopicAddProducerEvent(GalapagosEventContext context, String producerApplicationId, TopicMetadata metadata) { super(context, metadata); this.producerApplicationId = producerApplicationId; } }
466
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationAuthenticationChangeEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/ApplicationAuthenticationChangeEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import lombok.Getter; import org.json.JSONObject; @Getter public class ApplicationAuthenticationChangeEvent extends ApplicationEvent { private final JSONObject oldAuthentication; private final JSONObject newAuthentication; public ApplicationAuthenticationChangeEvent(GalapagosEventContext context, ApplicationMetadata metadata, JSONObject oldAuthentication, JSONObject newAuthentication) { super(context, metadata); this.oldAuthentication = oldAuthentication; this.newAuthentication = newAuthentication; } }
682
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationEventsListener.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/ApplicationEventsListener.java
package com.hermesworld.ais.galapagos.events; import java.util.concurrent.CompletableFuture; public interface ApplicationEventsListener { CompletableFuture<Void> handleApplicationRegistered(ApplicationEvent event); CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationAuthenticationChangeEvent event); CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequestEvent event); CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequestEvent event); CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequestEvent event); }
647
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
TopicCreatedEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicCreatedEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import lombok.Getter; @Getter public class TopicCreatedEvent extends TopicEvent { private TopicCreateParams topicCreateParams; public TopicCreatedEvent(GalapagosEventContext context, TopicMetadata metadata, TopicCreateParams topicCreateParams) { super(context, metadata); this.topicCreateParams = topicCreateParams; } }
532
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosEventSink.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/GalapagosEventSink.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.applications.ApplicationMetadata; import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest; import com.hermesworld.ais.galapagos.kafka.TopicCreateParams; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import com.hermesworld.ais.galapagos.topics.SchemaMetadata; import com.hermesworld.ais.galapagos.topics.TopicMetadata; import org.json.JSONObject; import java.util.concurrent.CompletableFuture; public interface GalapagosEventSink { CompletableFuture<Void> handleTopicCreated(TopicMetadata metadata, TopicCreateParams topicCreateParams); CompletableFuture<Void> handleTopicDeleted(TopicMetadata metadata); CompletableFuture<Void> handleTopicDescriptionChanged(TopicMetadata metadata); CompletableFuture<Void> handleTopicDeprecated(TopicMetadata metadata); CompletableFuture<Void> handleTopicUndeprecated(TopicMetadata metadata); CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicMetadata metadata); CompletableFuture<Void> handleTopicSchemaAdded(TopicMetadata metadata, SchemaMetadata newSchema); CompletableFuture<Void> handleTopicSchemaDeleted(TopicMetadata metadata); CompletableFuture<Void> handleSubscriptionCreated(SubscriptionMetadata subscription); CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionMetadata subscription); CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionMetadata subscription); CompletableFuture<Void> handleApplicationRegistered(ApplicationMetadata metadata); CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationMetadata metadata, JSONObject oldAuthentication, JSONObject newAuthentication); CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequest request); CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequest request); CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequest request); CompletableFuture<Void> handleAddTopicProducer(TopicMetadata metadata, String producerApplicationId); CompletableFuture<Void> handleRemoveTopicProducer(TopicMetadata metadata, String producerApplicationId); CompletableFuture<Void> handleTopicOwnerChanged(TopicMetadata metadata, String previousOwnerApplicationId); }
2,436
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
ApplicationOwnerRequestEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/ApplicationOwnerRequestEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest; import lombok.Getter; @Getter public class ApplicationOwnerRequestEvent extends AbstractGalapagosEvent { private ApplicationOwnerRequest request; public ApplicationOwnerRequestEvent(GalapagosEventContext context, ApplicationOwnerRequest request) { super(context); this.request = request; } }
446
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
AbstractGalapagosEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/AbstractGalapagosEvent.java
package com.hermesworld.ais.galapagos.events; import lombok.Getter; @Getter public abstract class AbstractGalapagosEvent { private GalapagosEventContext context; protected AbstractGalapagosEvent(GalapagosEventContext context) { this.context = context; } }
281
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosEventContext.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/GalapagosEventContext.java
package com.hermesworld.ais.galapagos.events; import java.util.Optional; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; public interface GalapagosEventContext { KafkaCluster getKafkaCluster(); <T> Optional<T> getContextValue(String key); }
264
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionEventsListener.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/SubscriptionEventsListener.java
package com.hermesworld.ais.galapagos.events; import java.util.concurrent.CompletableFuture; public interface SubscriptionEventsListener { CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event); CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event); CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event); }
387
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
GalapagosEventManager.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/GalapagosEventManager.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.kafka.KafkaCluster; public interface GalapagosEventManager { GalapagosEventSink newEventSink(KafkaCluster kafkaCluster); }
214
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z
SubscriptionEvent.java
/FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/SubscriptionEvent.java
package com.hermesworld.ais.galapagos.events; import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata; import lombok.Getter; @Getter public class SubscriptionEvent extends AbstractGalapagosEvent { private SubscriptionMetadata metadata; public SubscriptionEvent(GalapagosEventContext context, SubscriptionMetadata metadata) { super(context); this.metadata = metadata; } }
420
Java
.java
HermesGermany/galapagos
81
22
15
2020-10-02T09:40:40Z
2024-05-08T13:11:33Z