file_name stringlengths 6 86 | file_path stringlengths 45 249 | content stringlengths 47 6.26M | file_size int64 47 6.26M | language stringclasses 1 value | extension stringclasses 1 value | repo_name stringclasses 767 values | repo_stars int64 8 14.4k | repo_forks int64 0 1.17k | repo_open_issues int64 0 788 | repo_created_at stringclasses 767 values | repo_pushed_at stringclasses 767 values |
|---|---|---|---|---|---|---|---|---|---|---|---|
TopicSchemaRemovedEvent.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicSchemaRemovedEvent.java | package com.hermesworld.ais.galapagos.events;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import lombok.Getter;
@Getter
public class TopicSchemaRemovedEvent extends TopicEvent {
public TopicSchemaRemovedEvent(GalapagosEventContext context, TopicMetadata metadata) {
super(context, metadata);
}
}
| 331 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicEvent.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/TopicEvent.java | package com.hermesworld.ais.galapagos.events;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import lombok.Getter;
@Getter
public class TopicEvent extends AbstractGalapagosEvent {
private final TopicMetadata metadata;
public TopicEvent(GalapagosEventContext context, TopicMetadata metadata) {
super(context);
this.metadata = metadata;
}
}
| 384 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
GalapagosEventManagerImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/events/impl/GalapagosEventManagerImpl.java | package com.hermesworld.ais.galapagos.events.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import org.json.JSONObject;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
@Component
public class GalapagosEventManagerImpl implements GalapagosEventManager {
private final List<TopicEventsListener> topicListeners;
private final List<SubscriptionEventsListener> subscriptionListeners;
private final List<ApplicationEventsListener> applicationListeners;
private final List<EventContextSource> contextSources;
public GalapagosEventManagerImpl(@Lazy List<TopicEventsListener> topicListeners,
@Lazy List<SubscriptionEventsListener> subscriptionListeners,
@Lazy List<ApplicationEventsListener> applicationListeners, @Lazy List<EventContextSource> contextSources) {
this.topicListeners = topicListeners;
this.subscriptionListeners = subscriptionListeners;
this.applicationListeners = applicationListeners;
this.contextSources = contextSources;
}
@Override
public GalapagosEventSink newEventSink(KafkaCluster kafkaCluster) {
return new EventSinkImpl(kafkaCluster);
}
private class EventSinkImpl implements GalapagosEventSink {
private final GalapagosEventContext eventContext;
public EventSinkImpl(KafkaCluster kafkaCluster) {
this.eventContext = buildEventContext(kafkaCluster);
}
@Override
public CompletableFuture<Void> handleTopicCreated(TopicMetadata metadata, TopicCreateParams topicCreateParams) {
TopicCreatedEvent event = new TopicCreatedEvent(eventContext, metadata, topicCreateParams);
return handleEvent(topicListeners, l -> l.handleTopicCreated(event));
}
@Override
public CompletableFuture<Void> handleTopicDeleted(TopicMetadata metadata) {
TopicEvent event = new TopicEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicDeleted(event));
}
@Override
public CompletableFuture<Void> handleTopicDescriptionChanged(TopicMetadata metadata) {
TopicEvent event = new TopicEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicDescriptionChanged(event));
}
@Override
public CompletableFuture<Void> handleTopicDeprecated(TopicMetadata metadata) {
TopicEvent event = new TopicEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicDeprecated(event));
}
@Override
public CompletableFuture<Void> handleTopicUndeprecated(TopicMetadata metadata) {
TopicEvent event = new TopicEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicUndeprecated(event));
}
@Override
public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicMetadata metadata) {
TopicEvent event = new TopicEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicSubscriptionApprovalRequiredFlagChanged(event));
}
@Override
public CompletableFuture<Void> handleTopicSchemaAdded(TopicMetadata metadata, SchemaMetadata newSchema) {
TopicSchemaAddedEvent event = new TopicSchemaAddedEvent(eventContext, metadata, newSchema);
return handleEvent(topicListeners, l -> l.handleTopicSchemaAdded(event));
}
@Override
public CompletableFuture<Void> handleTopicSchemaDeleted(TopicMetadata metadata) {
TopicSchemaRemovedEvent event = new TopicSchemaRemovedEvent(eventContext, metadata);
return handleEvent(topicListeners, l -> l.handleTopicSchemaDeleted(event));
}
@Override
public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionMetadata subscription) {
SubscriptionEvent event = new SubscriptionEvent(eventContext, subscription);
return handleEvent(subscriptionListeners, l -> l.handleSubscriptionCreated(event));
}
@Override
public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionMetadata subscription) {
SubscriptionEvent event = new SubscriptionEvent(eventContext, subscription);
return handleEvent(subscriptionListeners, l -> l.handleSubscriptionDeleted(event));
}
@Override
public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionMetadata subscription) {
SubscriptionEvent event = new SubscriptionEvent(eventContext, subscription);
return handleEvent(subscriptionListeners, l -> l.handleSubscriptionUpdated(event));
}
@Override
public CompletableFuture<Void> handleApplicationRegistered(ApplicationMetadata metadata) {
ApplicationEvent event = new ApplicationEvent(eventContext, metadata);
return handleEvent(applicationListeners, l -> l.handleApplicationRegistered(event));
}
@Override
public CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationMetadata metadata,
JSONObject oldAuthentication, JSONObject newAuthentication) {
ApplicationAuthenticationChangeEvent event = new ApplicationAuthenticationChangeEvent(eventContext,
metadata, oldAuthentication, newAuthentication);
return handleEvent(applicationListeners, l -> l.handleApplicationAuthenticationChanged(event));
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequest request) {
ApplicationOwnerRequestEvent event = new ApplicationOwnerRequestEvent(eventContext, request);
return handleEvent(applicationListeners, l -> l.handleApplicationOwnerRequestCreated(event));
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequest request) {
ApplicationOwnerRequestEvent event = new ApplicationOwnerRequestEvent(eventContext, request);
return handleEvent(applicationListeners, l -> l.handleApplicationOwnerRequestUpdated(event));
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequest request) {
ApplicationOwnerRequestEvent event = new ApplicationOwnerRequestEvent(eventContext, request);
return handleEvent(applicationListeners, l -> l.handleApplicationOwnerRequestCanceled(event));
}
@Override
public CompletableFuture<Void> handleAddTopicProducer(TopicMetadata metadata, String producerId) {
TopicAddProducerEvent event = new TopicAddProducerEvent(eventContext, producerId, metadata);
return handleEvent(topicListeners, l -> l.handleAddTopicProducer(event));
}
@Override
public CompletableFuture<Void> handleRemoveTopicProducer(TopicMetadata metadata, String appDeleteId) {
TopicRemoveProducerEvent event = new TopicRemoveProducerEvent(eventContext, appDeleteId, metadata);
return handleEvent(topicListeners, l -> l.handleRemoveTopicProducer(event));
}
@Override
public CompletableFuture<Void> handleTopicOwnerChanged(TopicMetadata metadata,
String previousOwnerApplicationId) {
TopicOwnerChangeEvent event = new TopicOwnerChangeEvent(eventContext, previousOwnerApplicationId, metadata);
return handleEvent(topicListeners, l -> l.handleTopicOwnerChanged(event));
}
private <L> CompletableFuture<Void> handleEvent(Collection<L> listeners,
Function<L, CompletableFuture<Void>> listenerInvocation) {
CompletableFuture<Void> result = CompletableFuture.completedFuture(null);
for (L listener : listeners) {
CompletableFuture<Void> cf = listenerInvocation.apply(listener);
if (cf != null) {
result = result.thenCompose(o -> cf);
}
}
return result;
}
}
private GalapagosEventContext buildEventContext(KafkaCluster kafkaCluster) {
Map<String, Object> context = new HashMap<>();
for (EventContextSource contextSource : contextSources) {
context.putAll(contextSource.getContextValues());
}
return new EventContextImpl(kafkaCluster, context);
}
private static class EventContextImpl implements GalapagosEventContext {
private final KafkaCluster kafkaCluster;
private final Map<String, Object> context;
public EventContextImpl(KafkaCluster kafkaCluster, Map<String, Object> context) {
this.kafkaCluster = kafkaCluster;
this.context = context;
}
@SuppressWarnings("unchecked")
@Override
public <T> Optional<T> getContextValue(String key) {
return (Optional<T>) Optional.ofNullable(context.get(key));
}
@Override
public KafkaCluster getKafkaCluster() {
return kafkaCluster;
}
}
}
| 9,871 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ProducerCompatibilityErrorHandler.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/ProducerCompatibilityErrorHandler.java | package com.hermesworld.ais.galapagos.schemas;
import org.everit.json.schema.ArraySchema;
import static com.hermesworld.ais.galapagos.schemas.SchemaUtil.fullPropName;
import static com.hermesworld.ais.galapagos.schemas.SchemaUtil.propLocationName;
/**
* As the compatibility validation is always "consumer-view", checks for producer compatibility are performed with
* swapped schemas. This is why this error handler "swaps" the error messages again to make them more helpful. <br>
* Also, it allows for some "liberal" settings which are useful in most cases.
*/
public class ProducerCompatibilityErrorHandler extends ConsumerCompatibilityErrorHandler {
private final boolean allowNewOptionalProperties;
public ProducerCompatibilityErrorHandler(boolean allowNewOptionalProperties) {
this.allowNewOptionalProperties = allowNewOptionalProperties;
}
@Override
public void handleDefinedPropertyNoLongerDefined(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException {
if (!allowNewOptionalProperties) {
throw new IncompatibleSchemaException("Property " + fullPropName(context, property)
+ " is newly defined. Producers could already use this property (as additional properties are allowed) in a different format.");
}
}
@Override
public void handleCombinedSchemaReplacedByIncompatibleSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Schemas at " + propLocationName(context) + " have changed in an incompatible way");
}
@Override
public void handleSchemaTypeDiffers(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Schema type differs at " + propLocationName(context) + " (old: "
+ context.getCurrentNodeInNewSchema().getClass().getSimpleName() + ", new: "
+ context.getCurrentNodeInOldSchema().getClass().getSimpleName() + ")");
}
@Override
public void handleAdditionalPropertiesIntroduced(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("additionalProperties must not be removed "
+ "in a newer schema version (old producers could currently provide additional properties)");
}
@Override
public void handlePropertyNoLongerRequired(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Property " + fullPropName(context, property)
+ " is now required, but old producers may not yet provide it.");
}
@Override
public void handleMinPropertiesReduced(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Minimum number of properties cannot be increased at "
+ propLocationName(context) + ". Old producers could provide too few properties.");
}
@Override
public void handleRegexpPatternRemoved(SchemaCompatibilityValidationContext context, String pattern)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New pattern " + pattern + " introduced in new schema at "
+ propLocationName(context) + ". Producers may not yet adhere to this pattern.");
}
@Override
public void handleMultipleOfConstraintRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("multipleOf constraint added on number property "
+ propLocationName(context) + ". Current producers may provide incompatible values.");
}
@Override
public void handleIntegerChangedToNumber(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Old schema allowed any numeric values at property " + propLocationName(context)
+ " while new schema only allows integer values. Producers may provide incompatible values.");
}
@Override
public void handleNumberAllowsLowerValues(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema has increased lower boundary for values for number property "
+ propLocationName(context) + " than old schema. Producers may provide incompatible values.");
}
@Override
public void handleNumberAllowsGreaterValues(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema has decreased upper boundary for values for number property "
+ propLocationName(context) + " than old schema. Providers may provide incompatible values.");
}
@Override
public void handleArrayUniqueItemsRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema requires unique items for array at "
+ propLocationName(context) + ". Producers may not fulfill this requirement.");
}
@Override
public void handleArrayAllowsLessItems(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema requires a minimum of " + ((ArraySchema) context.getCurrentNodeInOldSchema()).getMinItems()
+ " items for array at " + propLocationName(context)
+ ", while old schema did not require that many items. Producers may provide too few items.");
}
@Override
public void handleArrayAllowsMoreItems(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema allows a maximum of " + ((ArraySchema) context.getCurrentNodeInOldSchema()).getMaxItems()
+ " items for array at " + propLocationName(context)
+ ", while old schema allowed more items. Producers may provide too many items.");
}
@Override
public void handleArrayAllSchemaToContainsSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Array at " + propLocationName(context)
+ " cannot be changed from a schema for at least ONE (contains) item to a schema for ALL items.");
}
@Override
public void handleArrayItemSchemaRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Array at " + propLocationName(context) + " cannot introduce a strong typing for items.");
}
@Override
public void handleArrayContainsSchemaNotFound(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Array at " + propLocationName(context)
+ " introduces a contains definition which was previously not present as any schema.");
}
@Override
public void handleArrayLessItemSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Array at " + propLocationName(context) + " defines strong schemas for more items than before.");
}
@Override
public void handleArrayMoreItemSchemasThanAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema allows less items in array at " + propLocationName(context)
+ " than were previously allowed here.");
}
@Override
public void handleArrayLessItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
ArraySchema newSchema = (ArraySchema) context.getCurrentNodeInOldSchema();
throw new IncompatibleSchemaException(
"New schema defines " + newSchema.getItemSchemas().size() + " items for array at "
+ propLocationName(context) + ", but old schema did not force this number of items.");
}
@Override
public void handleArrayMoreItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema does not accept additional items for array at "
+ propLocationName(context)
+ ", but old schema had higher limit for number of items in array. Producers may provide too many items.");
}
@Override
public void handleArrayItemSchemasRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema makes stronger requirements for array at " + propLocationName(context)
+ " than old schema does. Producers may not adhere to stronger definition.");
}
@Override
public void handleShorterStringsAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema requires longer strings than old schema for string property " + propLocationName(context));
}
@Override
public void handleLongerStringsAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema requires shorter strings than old schema for string property " + propLocationName(context));
}
@Override
public void handleEnumValueAdded(SchemaCompatibilityValidationContext context, String value)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Schema removes enum value for property " + propLocationName(context)
+ ": " + value + ". Current producers may still use this value.");
}
}
| 10,641 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaCompatibilityErrorHandler.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/SchemaCompatibilityErrorHandler.java | package com.hermesworld.ais.galapagos.schemas;
import static com.hermesworld.ais.galapagos.schemas.SchemaUtil.propLocationName;
public interface SchemaCompatibilityErrorHandler {
void handleCombinedSchemaReplacedByIncompatibleSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleSchemaTypeDiffers(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleAdditionalPropertiesIntroduced(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handlePropertyNoLongerRequired(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException;
void handleMinPropertiesReduced(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
default void handleInvalidDependenciesConfiguration(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Invalid dependencies configuration found at " + propLocationName(context));
}
void handleDefinedPropertyNoLongerDefined(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException;
void handleRegexpPatternRemoved(SchemaCompatibilityValidationContext context, String pattern)
throws IncompatibleSchemaException;
void handleMultipleOfConstraintRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleMultipleOfConstraintChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleIntegerChangedToNumber(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleNumberAllowsLowerValues(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleNumberAllowsGreaterValues(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayUniqueItemsRemoved(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleArrayAllowsLessItems(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleArrayAllowsMoreItems(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleArrayAllSchemaToContainsSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayItemSchemaRemoved(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleArrayContainsSchemaNotFound(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayLessItemSchemas(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleArrayMoreItemSchemasThanAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayLessItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayMoreItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleArrayItemSchemasRemoved(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleShorterStringsAllowed(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleLongerStringsAllowed(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleStringPatternChanged(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleStringFormatChanged(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleCombineOperatorChanged(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleCombineSubschemaNoMatch(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
default void handleUnresolvedSchemaReference(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Reference at " + propLocationName(context) + " could not be resolved");
}
void handleConstantValueChanged(SchemaCompatibilityValidationContext context) throws IncompatibleSchemaException;
void handleConditionalSchemaDiffers(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException;
void handleEnumValueAdded(SchemaCompatibilityValidationContext context, String value)
throws IncompatibleSchemaException;
}
| 4,920 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConsumerCompatibilityErrorHandler.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/ConsumerCompatibilityErrorHandler.java | package com.hermesworld.ais.galapagos.schemas;
import org.everit.json.schema.ArraySchema;
import static com.hermesworld.ais.galapagos.schemas.SchemaUtil.fullPropName;
import static com.hermesworld.ais.galapagos.schemas.SchemaUtil.propLocationName;
public class ConsumerCompatibilityErrorHandler implements SchemaCompatibilityErrorHandler {
private final boolean allowRemovedOptionalProperties;
public ConsumerCompatibilityErrorHandler() {
this(false);
}
public ConsumerCompatibilityErrorHandler(boolean allowRemovedOptionalProperties) {
this.allowRemovedOptionalProperties = allowRemovedOptionalProperties;
}
@Override
public void handleCombinedSchemaReplacedByIncompatibleSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Combined schema at " + propLocationName(context) + " is replaced by incompatible schema");
}
@Override
public void handleSchemaTypeDiffers(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Schema type differs at " + propLocationName(context) + " (new: "
+ context.getCurrentNodeInNewSchema().getClass().getSimpleName() + ", old: "
+ context.getCurrentNodeInOldSchema().getClass().getSimpleName() + ")");
}
@Override
public void handleAdditionalPropertiesIntroduced(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("additionalProperties must not be introduced "
+ "in a newer schema version (old consumers could rely on no additional properties being present)");
}
@Override
public void handlePropertyNoLongerRequired(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Property " + fullPropName(context, property)
+ " is no longer required, but old consumers could rely on it.");
}
@Override
public void handleMinPropertiesReduced(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Minimum number of properties cannot be reduced at "
+ propLocationName(context) + ". Old consumers could rely on having at least N properties.");
}
@Override
public void handleDefinedPropertyNoLongerDefined(SchemaCompatibilityValidationContext context, String property)
throws IncompatibleSchemaException {
if (!allowRemovedOptionalProperties) {
throw new IncompatibleSchemaException("Property " + fullPropName(context, property)
+ " is no longer defined in schema, but had a strong definition before. "
+ "Field format could have changed, and old consumers perhaps rely on previous format");
}
}
@Override
public void handleRegexpPatternRemoved(SchemaCompatibilityValidationContext context, String pattern)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Pattern " + pattern + " is no longer defined for new schema at " + propLocationName(context)
+ ". Consumers could rely on the schema specification for matching properties.");
}
@Override
public void handleMultipleOfConstraintRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"multipleOf constraint must not be removed on number property " + propLocationName(context));
}
@Override
public void handleMultipleOfConstraintChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"multipleOf constraint of property " + propLocationName(context) + " changed in an incompatible way");
}
@Override
public void handleIntegerChangedToNumber(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Old schema only allowed integer values at property "
+ propLocationName(context)
+ " while new schema allows non-integer values. Consumers may not be able to handle non-integer values");
}
@Override
public void handleNumberAllowsLowerValues(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema allows lower values for number property "
+ propLocationName(context) + " than old schema. Consumers may not be prepared for this.");
}
@Override
public void handleNumberAllowsGreaterValues(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema allows greater values for number property "
+ propLocationName(context) + " than old schema. Consumers may not be prepared for this.");
}
@Override
public void handleArrayUniqueItemsRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Old schema guaranteed unique items for array at "
+ propLocationName(context) + " while new schema removes this guarantee");
}
@Override
public void handleArrayAllowsLessItems(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Old schema had a minimum of "
+ ((ArraySchema) context.getCurrentNodeInOldSchema()).getMinItems() + " items for array at "
+ propLocationName(context) + ", while new schema does not guarantee this number of items in array");
}
@Override
public void handleArrayAllowsMoreItems(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Old schema had a maximum of " + ((ArraySchema) context.getCurrentNodeInOldSchema()).getMaxItems()
+ " items for array at " + propLocationName(context) + ", while new schema allows more items.");
}
@Override
public void handleArrayAllSchemaToContainsSchema(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Array at " + propLocationName(context)
+ " cannot be reduced from a schema for ALL items to a schema for at least ONE (contains) item");
}
@Override
public void handleArrayItemSchemaRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Array at " + propLocationName(context)
+ " had a strong typing for items before which cannot be removed (old consumers could rely on that format)");
}
@Override
public void handleArrayContainsSchemaNotFound(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Array at " + propLocationName(context)
+ " had a contains definition before which can not be found for any item of that array in the new schema");
}
@Override
public void handleArrayLessItemSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
ArraySchema oldSchema = (ArraySchema) context.getCurrentNodeInOldSchema();
ArraySchema newSchema = (ArraySchema) context.getCurrentNodeInNewSchema();
throw new IncompatibleSchemaException("Array at " + propLocationName(context) + " had a strong typing for "
+ oldSchema.getItemSchemas().size() + " elements, but new schema only defines "
+ newSchema.getItemSchemas().size() + " here");
}
@Override
public void handleArrayMoreItemSchemasThanAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("New schema defines more items in array at " + propLocationName(context)
+ " than were previously allowed here.");
}
@Override
public void handleArrayLessItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
ArraySchema oldSchema = (ArraySchema) context.getCurrentNodeInOldSchema();
throw new IncompatibleSchemaException("Old schema defined " + oldSchema.getItemSchemas().size()
+ " items for array at " + propLocationName(context)
+ ", but new schema does not guarantee this number of items in the array");
}
@Override
public void handleArrayMoreItemsThanSchemas(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Old schema did not accept additional items for array at "
+ propLocationName(context) + ", but new schema has higher limit for number of items in array");
}
@Override
public void handleArrayItemSchemasRemoved(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Old schema made stronger guarantees for array at " + propLocationName(context)
+ " than new schema does. Consumers could rely on previously guaranteed items");
}
@Override
public void handleShorterStringsAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema allows for shorter strings than old schema for string property "
+ propLocationName(context));
}
@Override
public void handleLongerStringsAllowed(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema allows for longer strings than old schema for string property "
+ propLocationName(context));
}
@Override
public void handleStringPatternChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema defines no or different pattern for string property " + propLocationName(context));
}
@Override
public void handleStringFormatChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("format differs for string property " + propLocationName(context));
}
@Override
public void handleCombineOperatorChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema defines differed combination operator at " + propLocationName(context));
}
@Override
public void handleCombineSubschemaNoMatch(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"New schema contains at least one incompatible subschema at property " + propLocationName(context));
}
@Override
public void handleConstantValueChanged(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException(
"Constant value for property " + propLocationName(context) + " has changed in new schema");
}
@Override
public void handleConditionalSchemaDiffers(SchemaCompatibilityValidationContext context)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Conditional schema differs at " + propLocationName(context));
}
@Override
public void handleEnumValueAdded(SchemaCompatibilityValidationContext context, String value)
throws IncompatibleSchemaException {
throw new IncompatibleSchemaException("Schema introduces new enum value for property "
+ propLocationName(context) + ": " + value + ". Consumers may not expect this value.");
}
}
| 12,855 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaCompatibilityValidationContext.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/SchemaCompatibilityValidationContext.java | package com.hermesworld.ais.galapagos.schemas;
import org.everit.json.schema.Schema;
public interface SchemaCompatibilityValidationContext {
String getCurrentPrefix();
Schema getOldSchema();
Schema getNewSchema();
Schema getCurrentNodeInOldSchema();
Schema getCurrentNodeInNewSchema();
}
| 316 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
IncompatibleSchemaException.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/IncompatibleSchemaException.java | package com.hermesworld.ais.galapagos.schemas;
public class IncompatibleSchemaException extends Exception {
private static final long serialVersionUID = -35786123925478756L;
public IncompatibleSchemaException(String message, Throwable cause) {
super(message, cause);
}
public IncompatibleSchemaException(String message) {
super(message);
}
}
| 383 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaCompatibilityValidator.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/SchemaCompatibilityValidator.java | package com.hermesworld.ais.galapagos.schemas;
import org.everit.json.schema.*;
import org.everit.json.schema.regexp.Regexp;
import java.lang.reflect.Field;
import java.util.*;
public class SchemaCompatibilityValidator {
// Known limitations:
// - Regex Pattern compatibility is not checked - every difference is treated as incompatible.
// Theoretically, there could be stricter patterns which include the previous patterns.
private final ValidationContextImpl context = new ValidationContextImpl();
private final SchemaCompatibilityErrorHandler errorHandler;
public SchemaCompatibilityValidator(Schema oldSchema, Schema newSchema,
SchemaCompatibilityErrorHandler errorHandler) {
context.oldSchema = oldSchema;
context.newSchema = newSchema;
this.errorHandler = errorHandler;
}
public void validate() throws IncompatibleSchemaException {
context.prefixSegments.clear();
verifySchemasCompatible(context.oldSchema, context.newSchema);
}
private void verifySchemasCompatible(Schema oldSchema, Schema newSchema) throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getClass() == EmptySchema.class) {
// always compatible
return;
}
// special case: "anyOf" or "oneOf" can be replaced by one of its subschemas
if (oldSchema.getClass() == CombinedSchema.class && newSchema.getClass() != CombinedSchema.class) {
verifyCombinedReplacedBySubschema((CombinedSchema) oldSchema, newSchema);
return;
}
if (newSchema.getClass() != oldSchema.getClass()) {
errorHandler.handleSchemaTypeDiffers(context);
return;
}
if (newSchema.getClass() == ObjectSchema.class) {
verifySchemasCompatible((ObjectSchema) oldSchema, (ObjectSchema) newSchema);
}
else if (newSchema.getClass() == StringSchema.class) {
verifySchemasCompatible((StringSchema) oldSchema, (StringSchema) newSchema);
}
else if (newSchema.getClass() == EnumSchema.class) {
verifySchemasCompatible((EnumSchema) oldSchema, (EnumSchema) newSchema);
}
else if (newSchema.getClass() == ArraySchema.class) {
verifySchemasCompatible((ArraySchema) oldSchema, (ArraySchema) newSchema);
}
else if (newSchema.getClass() == NumberSchema.class) {
verifySchemasCompatible((NumberSchema) oldSchema, (NumberSchema) newSchema);
}
else if (newSchema.getClass() == CombinedSchema.class) {
verifySchemasCompatible((CombinedSchema) oldSchema, (CombinedSchema) newSchema);
}
else if (newSchema.getClass() == ConditionalSchema.class) {
verifySchemasCompatible((ConditionalSchema) oldSchema, (ConditionalSchema) newSchema);
}
else if (newSchema.getClass() == NotSchema.class) {
verifySchemasCompatible((NotSchema) oldSchema, (NotSchema) newSchema);
}
else if (newSchema.getClass() == ConstSchema.class) {
verifySchemasCompatible((ConstSchema) oldSchema, (ConstSchema) newSchema);
}
else if (newSchema.getClass() == ReferenceSchema.class) {
verifySchemasCompatible((ReferenceSchema) oldSchema, (ReferenceSchema) newSchema);
}
else if (newSchema.getClass() == NullSchema.class || newSchema.getClass() == BooleanSchema.class) {
// noinspection UnnecessaryReturnStatement
return;
}
else {
// unsupported (top) schema type
throw new IncompatibleSchemaException(
"Unsupported schema type used: " + oldSchema.getClass().getSimpleName());
}
}
private void verifySchemasCompatible(ObjectSchema oldSchema, ObjectSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (!oldSchema.permitsAdditionalProperties() && newSchema.permitsAdditionalProperties()) {
errorHandler.handleAdditionalPropertiesIntroduced(context);
return;
}
// required properties must still be required
for (String property : oldSchema.getRequiredProperties()) {
if (!newSchema.getRequiredProperties().contains(property)) {
errorHandler.handlePropertyNoLongerRequired(context, property);
return;
}
}
// min properties cannot be reduced (consumer could rely on having at least N
// properties)
if (oldSchema.getMinProperties() != null && (newSchema.getMinProperties() == null
|| newSchema.getMinProperties() < oldSchema.getMinProperties())) {
errorHandler.handleMinPropertiesReduced(context);
return;
}
// Previously strongly typed (optional) properties must still be available with
// same format
oldPropsLoop: for (String property : oldSchema.getPropertySchemas().keySet()) {
Schema oldPropSchema = oldSchema.getPropertySchemas().get(property);
// simple case: still exists directly
Schema newPropSchema = newSchema.getPropertySchemas().get(property);
if (newPropSchema != null) {
pushPrefix(".", property);
verifySchemasCompatible(oldPropSchema, newPropSchema);
popPrefix();
continue;
}
// could now be inside the dependencies (verify ALL occurrences to be compatible
// to previous schema)
boolean found = false;
for (Schema newDepSchema : newSchema.getSchemaDependencies().values()) {
if (!(newDepSchema instanceof ObjectSchema)) {
errorHandler.handleInvalidDependenciesConfiguration(context);
return;
}
newPropSchema = ((ObjectSchema) newDepSchema).getPropertySchemas().get(property);
if (newPropSchema != null) {
pushPrefix(".", property);
verifySchemasCompatible(oldPropSchema, newPropSchema);
popPrefix();
found = true;
}
}
if (found) {
continue;
}
// could now be covered by a Pattern
Map<Regexp, Schema> newPatternSchemas = getPatternProperties(newSchema);
for (Map.Entry<Regexp, Schema> entry : newPatternSchemas.entrySet()) {
if (entry.getKey().patternMatchingFailure(property).isEmpty()) {
pushPrefix(".", property);
verifySchemasCompatible(oldPropSchema, entry.getValue());
popPrefix();
// JSON Schema logic: First matching pattern wins...
continue oldPropsLoop;
}
}
// if no additionalProperties allowed in new schema, we are fine, because will not occur in different format
if (!newSchema.permitsAdditionalProperties() && !oldSchema.getRequiredProperties().contains(property)) {
continue;
}
// OK, defined nowhere, so we can't be sure that it will still be generated in same form
errorHandler.handleDefinedPropertyNoLongerDefined(context, property);
}
// strong assumption: If pattern properties were used, all patterns must still
// exist, and still be compatible
Map<Regexp, Schema> oldPatternSchemas = getPatternProperties(oldSchema);
Map<Regexp, Schema> newPatternSchemas = getPatternProperties(newSchema);
for (Map.Entry<Regexp, Schema> pattern : oldPatternSchemas.entrySet()) {
// lib does not implement equals() for Regexp class, so...
Optional<Regexp> newKey = newPatternSchemas.keySet().stream()
.filter(r -> r.toString().equals(pattern.getKey().toString())).findAny();
if (newKey.isEmpty()) {
errorHandler.handleRegexpPatternRemoved(context, pattern.getKey().toString());
return;
}
// directly compare, while we're here
pushPrefix("", "(" + pattern + ")");
verifySchemasCompatible(pattern.getValue(), newPatternSchemas.get(newKey.get()));
popPrefix();
}
}
private void verifySchemasCompatible(ArraySchema oldSchema, ArraySchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.needsUniqueItems() && !newSchema.needsUniqueItems()) {
errorHandler.handleArrayUniqueItemsRemoved(context);
}
if (oldSchema.getMinItems() != null && minAllowedItems(newSchema) < oldSchema.getMinItems()) {
errorHandler.handleArrayAllowsLessItems(context);
}
if (oldSchema.getMaxItems() != null && maxAllowedItems(newSchema) > oldSchema.getMaxItems()) {
errorHandler.handleArrayAllowsMoreItems(context);
}
if (oldSchema.getAllItemSchema() != null) {
if (newSchema.getAllItemSchema() == null) {
if (newSchema.getContainedItemSchema() != null) {
errorHandler.handleArrayAllSchemaToContainsSchema(context);
}
else if (newSchema.getItemSchemas() == null) {
errorHandler.handleArrayItemSchemaRemoved(context);
}
else {
for (int i = 0; i < newSchema.getItemSchemas().size(); i++) {
pushPrefix("[" + i + "]");
verifySchemasCompatible(oldSchema.getAllItemSchema(), newSchema.getItemSchemas().get(i));
popPrefix();
}
}
}
else {
pushPrefix("[all]");
verifySchemasCompatible(oldSchema.getAllItemSchema(), newSchema.getAllItemSchema());
popPrefix();
}
}
else if (oldSchema.getContainedItemSchema() != null) {
if (newSchema.getContainedItemSchema() != null) {
pushPrefix("[contains]");
verifySchemasCompatible(oldSchema.getContainedItemSchema(), newSchema.getContainedItemSchema());
popPrefix();
}
else if (newSchema.getAllItemSchema() != null) {
pushPrefix("[contains/all]");
verifySchemasCompatible(oldSchema.getContainedItemSchema(), newSchema.getAllItemSchema());
popPrefix();
}
else if (newSchema.getItemSchemas() != null) {
// fine if at least ONE item matches!
boolean match = false;
for (int i = 0; i < newSchema.getItemSchemas().size(); i++) {
pushPrefix("[" + i + "]");
try {
verifySchemasCompatible(oldSchema.getContainedItemSchema(), newSchema.getItemSchemas().get(i));
match = true;
break;
}
catch (IncompatibleSchemaException ex) {
// ignore here - no match
}
finally {
popPrefix();
}
}
if (!match) {
errorHandler.handleArrayContainsSchemaNotFound(context);
}
}
}
else if (oldSchema.getItemSchemas() != null) {
if (newSchema.getItemSchemas() != null) {
// must either contain more elements (if oldSchema allows this), or exactly the same
if (newSchema.getItemSchemas().size() < oldSchema.getItemSchemas().size()) {
errorHandler.handleArrayLessItemSchemas(context);
}
else if (newSchema.getItemSchemas().size() > oldSchema.getItemSchemas().size()
&& (!oldSchema.permitsAdditionalItems() || (oldSchema.getMaxItems() != null
&& oldSchema.getMaxItems() < newSchema.getItemSchemas().size()))) {
errorHandler.handleArrayMoreItemSchemasThanAllowed(context);
}
else {
for (int i = 0; i < oldSchema.getItemSchemas().size(); i++) {
pushPrefix("[" + i + "]");
verifySchemasCompatible(oldSchema.getItemSchemas().get(i), newSchema.getItemSchemas().get(i));
popPrefix();
}
}
}
else if (newSchema.getAllItemSchema() != null) {
// first of all, ensure array size is compatible
int oldSize = oldSchema.getItemSchemas().size();
if (newSchema.getMinItems() == null || newSchema.getMinItems() < oldSize) {
errorHandler.handleArrayLessItemsThanSchemas(context);
}
else if (!oldSchema.permitsAdditionalItems()
&& (newSchema.getMaxItems() == null || newSchema.getMaxItems() > oldSize)) {
errorHandler.handleArrayMoreItemsThanSchemas(context);
}
else {
// must match ALL previous items
for (int i = 0; i < oldSchema.getItemSchemas().size(); i++) {
pushPrefix("[" + i + "]");
verifySchemasCompatible(oldSchema.getItemSchemas().get(i), newSchema.getAllItemSchema());
popPrefix();
}
}
}
else {
errorHandler.handleArrayItemSchemasRemoved(context);
}
}
}
private void verifySchemasCompatible(CombinedSchema oldSchema, CombinedSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getCriterion() != newSchema.getCriterion()) {
errorHandler.handleCombineOperatorChanged(context);
return;
}
// every new subschema must be compatible to a previous old schema
int i = 0;
for (Schema schema : newSchema.getSubschemas()) {
boolean match = false;
for (Schema os : oldSchema.getSubschemas()) {
pushPrefix("(" + newSchema.getCriterion() + ")[" + i + "]");
try {
verifySchemasCompatible(os, schema);
match = true;
break;
}
catch (IncompatibleSchemaException ex) {
// ignore; try next
}
finally {
popPrefix();
}
}
if (!match) {
errorHandler.handleCombineSubschemaNoMatch(context);
}
i++;
}
}
private void verifySchemasCompatible(StringSchema oldSchema, StringSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getMinLength() != null
&& (newSchema.getMinLength() == null || newSchema.getMinLength() < oldSchema.getMinLength())) {
errorHandler.handleShorterStringsAllowed(context);
}
if (oldSchema.getMaxLength() != null
&& (newSchema.getMaxLength() == null || newSchema.getMaxLength() > oldSchema.getMaxLength())) {
errorHandler.handleLongerStringsAllowed(context);
}
if (oldSchema.getPattern() != null && (newSchema.getPattern() == null
|| !newSchema.getPattern().toString().equals(oldSchema.getPattern().toString()))) {
errorHandler.handleStringPatternChanged(context);
}
if (oldSchema.getFormatValidator() != null && (newSchema.getFormatValidator() == null
|| !oldSchema.getFormatValidator().formatName().equals(newSchema.getFormatValidator().formatName()))) {
errorHandler.handleStringFormatChanged(context);
}
}
private static final double DELTA = 0.0000001;
private void verifySchemasCompatible(NumberSchema oldSchema, NumberSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getMultipleOf() != null) {
if (newSchema.getMultipleOf() == null) {
errorHandler.handleMultipleOfConstraintRemoved(context);
}
else {
Number m1 = oldSchema.getMultipleOf();
Number m2 = newSchema.getMultipleOf();
// avoid dumb DIV/0
if (Math.abs(m1.doubleValue()) > DELTA) {
double testVal = m2.doubleValue() / m1.doubleValue();
testVal -= Math.round(testVal);
if (testVal > DELTA) {
errorHandler.handleMultipleOfConstraintChanged(context);
}
}
}
}
if (oldSchema.requiresInteger() && !newSchema.requiresInteger()) {
errorHandler.handleIntegerChangedToNumber(context);
}
if (allowsForLowerThan(oldSchema, newSchema)) {
errorHandler.handleNumberAllowsLowerValues(context);
}
if (allowsForGreaterThan(oldSchema, newSchema)) {
errorHandler.handleNumberAllowsGreaterValues(context);
}
}
private void verifySchemasCompatible(NotSchema oldSchema, NotSchema newSchema) throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
// intentionally swapped parameters, because negated schema must get more liberal
// to have the effect of "stricter" for the not-schema.
pushPrefix("(not)");
verifySchemasCompatible(newSchema.getMustNotMatch(), oldSchema.getMustNotMatch());
popPrefix();
}
private void verifySchemasCompatible(ReferenceSchema oldSchema, ReferenceSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getReferredSchema() != null) {
if (newSchema.getReferredSchema() == null) {
errorHandler.handleUnresolvedSchemaReference(context);
}
else {
pushPrefix(oldSchema.getReferenceValue());
verifySchemasCompatible(oldSchema.getReferredSchema(), newSchema.getReferredSchema());
popPrefix();
}
}
}
private void verifySchemasCompatible(ConstSchema oldSchema, ConstSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getPermittedValue() == null ? newSchema.getPermittedValue() != null
: !Objects.deepEquals(oldSchema.getPermittedValue(), newSchema.getPermittedValue())) {
errorHandler.handleConstantValueChanged(context);
}
}
private void verifySchemasCompatible(ConditionalSchema oldSchema, ConditionalSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
if (oldSchema.getIfSchema().isPresent() != newSchema.getIfSchema().isPresent()
|| oldSchema.getThenSchema().isPresent() != newSchema.getThenSchema().isPresent()
|| oldSchema.getElseSchema().isPresent() != newSchema.getElseSchema().isPresent()) {
errorHandler.handleConditionalSchemaDiffers(context);
}
if (oldSchema.getIfSchema().isPresent()) {
verifySchemasCompatible(oldSchema.getIfSchema().get(), newSchema.getIfSchema().orElseThrow());
}
if (oldSchema.getThenSchema().isPresent()) {
verifySchemasCompatible(oldSchema.getThenSchema().get(), newSchema.getThenSchema().orElseThrow());
}
if (oldSchema.getElseSchema().isPresent()) {
verifySchemasCompatible(oldSchema.getElseSchema().get(), newSchema.getElseSchema().orElseThrow());
}
}
private void verifySchemasCompatible(EnumSchema oldSchema, EnumSchema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
// no new values must have been added
for (Object o : newSchema.getPossibleValues()) {
if (!oldSchema.getPossibleValues().contains(o)) {
errorHandler.handleEnumValueAdded(context, o.toString());
return;
}
}
}
private void verifyCombinedReplacedBySubschema(CombinedSchema oldSchema, Schema newSchema)
throws IncompatibleSchemaException {
setCurrentNodes(oldSchema, newSchema);
String crit = oldSchema.getCriterion().toString();
if (!"oneOf".equals(crit) && !"anyOf".equals(crit)) {
errorHandler.handleCombinedSchemaReplacedByIncompatibleSchema(context);
// if that is fine for error handler, no need to check further
return;
}
for (Schema os : oldSchema.getSubschemas()) {
try {
verifySchemasCompatible(os, newSchema);
return;
}
catch (IncompatibleSchemaException ex) {
// next
}
}
errorHandler.handleCombinedSchemaReplacedByIncompatibleSchema(context);
}
private static boolean allowsForLowerThan(NumberSchema oldSchema, NumberSchema newSchema) {
Number minOld = oldSchema.getMinimum();
Number minOldExcl = oldSchema.isExclusiveMinimum() ? minOld : oldSchema.getExclusiveMinimumLimit();
Number minNew = newSchema.getMinimum();
Number minNewExcl = newSchema.isExclusiveMinimum() ? minNew : newSchema.getExclusiveMinimumLimit();
if (minOld != null) {
if (minNewExcl != null) {
if (minNewExcl.doubleValue() >= minOld.doubleValue()) {
return false;
}
return !newSchema.requiresInteger() || minNewExcl.intValue() < minOld.intValue() - 1;
}
else {
return minNew == null || minNew.doubleValue() < minOld.doubleValue() - DELTA;
}
}
if (minOldExcl != null) {
if (minNew != null) {
if (minNew.doubleValue() < minOldExcl.doubleValue()) {
return true;
}
if (minNew.doubleValue() > minOldExcl.doubleValue() + 1) {
return false;
}
if (!newSchema.requiresInteger()) {
return true;
}
return minNew.intValue() != minOldExcl.intValue() + 1;
}
else {
return minNewExcl == null || minNewExcl.doubleValue() < minOldExcl.doubleValue() - DELTA;
}
}
// old schema had no min limit
return false;
}
private static boolean allowsForGreaterThan(NumberSchema oldSchema, NumberSchema newSchema) {
Number maxOld = oldSchema.getMaximum();
Number maxOldExcl = oldSchema.isExclusiveMaximum() ? maxOld : oldSchema.getExclusiveMaximumLimit();
Number maxNew = newSchema.getMaximum();
Number maxNewExcl = newSchema.isExclusiveMaximum() ? maxNew : newSchema.getExclusiveMaximumLimit();
if (maxOld != null) {
if (maxNewExcl != null) {
if (maxNewExcl.doubleValue() <= maxOld.doubleValue()) {
return false;
}
return !newSchema.requiresInteger() || maxNewExcl.intValue() > maxOld.intValue() + 1;
}
else {
return maxNew == null || maxNew.doubleValue() > maxOld.doubleValue() + DELTA;
}
}
if (maxOldExcl != null) {
if (maxNew != null) {
if (maxNew.doubleValue() > maxOldExcl.doubleValue()) {
return true;
}
if (maxNew.doubleValue() < maxOldExcl.doubleValue() - 1) {
return false;
}
if (!newSchema.requiresInteger()) {
return true;
}
return maxNew.intValue() != maxOldExcl.intValue() - 1;
}
else {
return maxNewExcl == null || maxNewExcl.doubleValue() > maxOldExcl.doubleValue() + DELTA;
}
}
// old schema had no max limit
return false;
}
private static int minAllowedItems(ArraySchema schema) {
if (schema.getMinItems() != null) {
return schema.getMinItems();
}
if (schema.getItemSchemas() != null) {
return schema.getItemSchemas().size();
}
return 0;
}
private static int maxAllowedItems(ArraySchema schema) {
if (schema.getMaxItems() != null) {
return schema.getMaxItems();
}
if (schema.getItemSchemas() != null && !schema.permitsAdditionalItems()) {
return schema.getItemSchemas().size();
}
return Integer.MAX_VALUE;
}
@SuppressWarnings("unchecked")
private static Map<Regexp, Schema> getPatternProperties(ObjectSchema schema) {
try {
Field field = ObjectSchema.class.getDeclaredField("patternProperties");
field.setAccessible(true);
Map<Regexp, Schema> result = (Map<Regexp, Schema>) field.get(schema);
return result == null ? Collections.emptyMap() : result;
}
catch (SecurityException | NoSuchFieldException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
private void pushPrefix(String segment) {
pushPrefix("", segment);
}
private void pushPrefix(String separator, String segment) {
context.prefixSegments.push(context.prefixSegments.isEmpty() ? segment : separator + segment);
}
private void popPrefix() {
context.prefixSegments.pop();
}
private void setCurrentNodes(Schema currentOldNode, Schema currentNewNode) {
context.currentOldNode = currentOldNode;
context.currentNewNode = currentNewNode;
}
private static class ValidationContextImpl implements SchemaCompatibilityValidationContext {
private Schema oldSchema;
private Schema newSchema;
private final Stack<String> prefixSegments = new Stack<>();
private Schema currentOldNode;
private Schema currentNewNode;
@Override
public String getCurrentPrefix() {
return prefixSegments.isEmpty() ? null : String.join("", prefixSegments);
}
@Override
public Schema getOldSchema() {
return oldSchema;
}
@Override
public Schema getNewSchema() {
return newSchema;
}
@Override
public Schema getCurrentNodeInOldSchema() {
return currentOldNode;
}
@Override
public Schema getCurrentNodeInNewSchema() {
return currentNewNode;
}
}
}
| 27,497 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/schemas/SchemaUtil.java | package com.hermesworld.ais.galapagos.schemas;
import org.everit.json.schema.Schema;
import java.util.Optional;
/**
* Utility class to compare two schemas for Consumer compatibility. Guideline is that a potential consumer which
* currently consumes data in <code>oldSchema</code> format should be able to also handle data in <code>newSchema</code>
* format without any adjustments. <br>
*/
public final class SchemaUtil {
private SchemaUtil() {
}
/**
* Checks if two JSON schemas are equal. Equality is defined by the org.everit JSON schema library.
*
* @param schema1 Schema to compare.
* @param schema2 Schema to compare.
* @return <code>true</code> if both schemas are equal according to the definition of the library,
* <code>false</code> otherwise.
*/
public static boolean areEqual(Schema schema1, Schema schema2) {
return schema1.getClass() == schema2.getClass() && schema1.equals(schema2);
}
static String propLocationName(SchemaCompatibilityValidationContext context) {
return Optional.ofNullable(context.getCurrentPrefix()).orElse("root");
}
static String fullPropName(SchemaCompatibilityValidationContext context, String property) {
return Optional.ofNullable(context.getCurrentPrefix()).map(s -> s + "." + property).orElse(property);
}
}
| 1,365 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationMetadata.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/ApplicationMetadata.java | package com.hermesworld.ais.galapagos.applications;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
@Getter
@Setter
@JsonSerialize
@JsonIgnoreProperties(ignoreUnknown = true)
public class ApplicationMetadata implements HasKey, ApplicationPrefixes {
private String applicationId;
private List<String> consumerGroupPrefixes = new ArrayList<>();
private List<String> internalTopicPrefixes = new ArrayList<>();
private List<String> transactionIdPrefixes = new ArrayList<>();
private String authenticationJson;
public ApplicationMetadata() {
}
public ApplicationMetadata(ApplicationMetadata original) {
this.applicationId = original.applicationId;
this.consumerGroupPrefixes = List.copyOf(original.consumerGroupPrefixes);
this.internalTopicPrefixes = List.copyOf(original.consumerGroupPrefixes);
this.transactionIdPrefixes = List.copyOf(original.transactionIdPrefixes);
this.authenticationJson = original.authenticationJson;
}
@Override
public String key() {
return applicationId;
}
}
| 1,376 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
BusinessCapability.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/BusinessCapability.java | package com.hermesworld.ais.galapagos.applications;
public interface BusinessCapability {
String getId();
String getName();
}
| 138 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsService.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/ApplicationsService.java | package com.hermesworld.ais.galapagos.applications;
import org.json.JSONObject;
import java.io.OutputStream;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
public interface ApplicationsService {
List<? extends KnownApplication> getKnownApplications(boolean excludeUserApps);
Optional<KnownApplication> getKnownApplication(String applicationId);
Optional<ApplicationMetadata> getApplicationMetadata(String environmentId, String applicationId);
CompletableFuture<ApplicationOwnerRequest> submitApplicationOwnerRequest(String applicationId, String comments);
List<ApplicationOwnerRequest> getUserApplicationOwnerRequests();
List<ApplicationOwnerRequest> getAllApplicationOwnerRequests();
CompletableFuture<ApplicationOwnerRequest> updateApplicationOwnerRequest(String requestId, RequestState newState);
CompletableFuture<Boolean> cancelUserApplicationOwnerRequest(String requestId);
List<? extends KnownApplication> getUserApplications();
default List<ApplicationMetadata> getAllApplicationMetadata(String environmentId) {
return getAllApplicationOwnerRequests().stream().map(req -> req.getApplicationId()).distinct()
.map(id -> getApplicationMetadata(environmentId, id).orElse(null)).filter(Objects::nonNull)
.collect(Collectors.toList());
}
boolean isUserAuthorizedFor(String applicationId);
CompletableFuture<ApplicationMetadata> registerApplicationOnEnvironment(String environmentId, String applicationId,
JSONObject registerParams, OutputStream outputStreamForSecret);
/**
* Resets the allowed prefixes for the given application on the given environment to their defaults, as resulting
* from current Galapagos configuration for naming rules. Kafka ACLs are <b>not</b> updated by this method, and
* <b>no</b> events are fired via the <code>EventManager</code>. <br>
* This removes prefixes which have previously been assigned to the given application (e.g. due to different naming
* rules or changed application aliases), so this could <b>break</b> running applications when using previously
* assigned, but no longer available prefixes - once the resulting ACL changes are applied (not done by this
* method). <br>
* Currently, this functionality is only available using the admin job "reset-application-prefixes", which indeed
* <b>does</b> also refresh the associated Kafka ACLs.
*
* @param environmentId ID of Kafka cluster to operate on.
* @param applicationId ID of application to reset prefixes of.
*
* @return A CompletableFuture completing once Application Metadata has been updated, or failing when any ID is
* invalid, or any other error occurs.
*/
CompletableFuture<ApplicationMetadata> resetApplicationPrefixes(String environmentId, String applicationId);
}
| 3,004 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationOwnerRequest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/ApplicationOwnerRequest.java | package com.hermesworld.ais.galapagos.applications;
import java.time.ZonedDateTime;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
@JsonSerialize
public class ApplicationOwnerRequest implements HasKey {
private String id;
private String applicationId;
private String userName;
private String comments;
@JsonFormat(shape = JsonFormat.Shape.STRING)
private ZonedDateTime createdAt;
private String notificationEmailAddress;
@JsonFormat(shape = JsonFormat.Shape.STRING)
private ZonedDateTime lastStatusChangeAt;
private String lastStatusChangeBy;
private RequestState state;
@Override
public String key() {
return id;
}
}
| 875 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
RequestState.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/RequestState.java | package com.hermesworld.ais.galapagos.applications;
public enum RequestState {
SUBMITTED, APPROVED, REJECTED, REVOKED, RESIGNED
}
| 137 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KnownApplication.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/KnownApplication.java | package com.hermesworld.ais.galapagos.applications;
import java.net.URL;
import java.util.List;
import java.util.Set;
/**
* An application "known" to Galapagos. The list of all known applications form the basis from which the users can
* select "their" applications and create Application Owner Requests for. Known applications must be registered on a
* Kafka Cluster using
* {@link ApplicationsService#createApplicationCertificateAndPrivateKey(String, String, String, java.io.OutputStream)}
* or
* {@link ApplicationsService#createApplicationCertificateFromCsr(String, String, String, String, java.io.OutputStream)}
* to retrieve a Client Certificate for this application.
*
* @author AlbrechtFlo
*
*/
public interface KnownApplication {
/**
* The ID of this application. It can be any non-empty string, but must be constant over time and unique throughout
* all known applications.
*
* @return The ID of this application, never <code>null</code> nor an empty string.
*/
String getId();
/**
* The human-readable name of this application. While the ID is constant for the "same" application, this name may
* change over time.
*
* @return The human-readable name of this application, never <code>null</code> nor an empty string.
*/
String getName();
/**
* The (potentially empty) set of aliases for this application. An application may have some different names or
* commonly used abbreviations. These aliases may also be used for some Kafka objects where usually the name of the
* application has to be used (e.g. in names of internal topics).
*
* @return The (potentially empty) set of aliases for this application, never <code>null</code>.
*/
Set<String> getAliases();
/**
* A URL providing more information about this application, usually in an Enterprise Architecture Tool, e.g. LeanIX.
*
* @return A URL providing more information about this application, or <code>null</code>.
*/
URL getInfoUrl();
/**
* The (potentially empty) list of business capabilities which are supported by this application. For the naming
* schema of "API topics", the business capabilities are a central part. Applications which do not support any
* business capability cannot provide (create) API topics (but internal topics).
*
* @return The (potentially empty) list of business capabilities supported by this application, never
* <code>null</code>.
*/
List<BusinessCapability> getBusinessCapabilities();
}
| 2,592 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KnownApplicationDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/KnownApplicationDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import java.util.List;
import lombok.Getter;
@Getter
public class KnownApplicationDto {
private String id;
private String name;
private String infoUrl;
private List<BusinessCapabilityDto> businessCapabilities;
private List<String> aliases;
public KnownApplicationDto(String id, String name, String infoUrl, List<BusinessCapabilityDto> businessCapabilities,
List<String> aliases) {
this.id = id;
this.name = name;
this.infoUrl = infoUrl;
this.businessCapabilities = businessCapabilities;
this.aliases = aliases;
}
}
| 668 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UpdateApplicationDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/UpdateApplicationDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class UpdateApplicationDto {
private String kafkaGroupPrefix;
}
| 202 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationPrefixesDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationPrefixesDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import org.springframework.lang.NonNull;
import java.util.List;
@JsonSerialize
@Getter
public class ApplicationPrefixesDto {
private final List<String> internalTopicPrefixes;
private final List<String> consumerGroupPrefixes;
private final List<String> transactionIdPrefixes;
public ApplicationPrefixesDto(@NonNull List<String> internalTopicPrefixes,
@NonNull List<String> consumerGroupPrefixes, @NonNull List<String> transactionIdPrefixes) {
this.internalTopicPrefixes = List.copyOf(internalTopicPrefixes);
this.consumerGroupPrefixes = List.copyOf(consumerGroupPrefixes);
this.transactionIdPrefixes = List.copyOf(transactionIdPrefixes);
}
}
| 854 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationCertificateDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationCertificateDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class ApplicationCertificateDto {
private String environmentId;
private String dn;
private String expiresAt;
public ApplicationCertificateDto(String environmentId, String dn, String expiresAt) {
this.environmentId = environmentId;
this.dn = dn;
this.expiresAt = expiresAt;
}
}
| 458 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CertificateRequestDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/CertificateRequestDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import lombok.Getter;
import lombok.Setter;
@JsonDeserialize
@Getter
@Setter
public class CertificateRequestDto {
private String csrData;
private boolean generateKey;
private boolean extendCertificate;
}
| 351 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UpdateApplicationOwnerRequestDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/UpdateApplicationOwnerRequestDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.hermesworld.ais.galapagos.applications.RequestState;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class UpdateApplicationOwnerRequestDto {
private RequestState newState;
}
| 277 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CreatedApiKeyDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/CreatedApiKeyDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import lombok.Setter;
@JsonSerialize
@Getter
@Setter
public class CreatedApiKeyDto {
private String apiKey;
private String apiSecret;
}
| 298 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationOwnerRequestSubmissionDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationOwnerRequestSubmissionDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class ApplicationOwnerRequestSubmissionDto {
private String applicationId;
private String comments;
}
| 245 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationAuthenticationsDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationAuthenticationsDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import lombok.Setter;
import java.util.Map;
@JsonSerialize
@Getter
@Setter
public class ApplicationAuthenticationsDto {
private Map<String, AuthenticationDto> authentications;
}
| 336 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
BusinessCapabilityDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/BusinessCapabilityDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class BusinessCapabilityDto {
private String id;
private String name;
public BusinessCapabilityDto(String id, String name) {
this.id = id;
this.name = name;
}
}
| 329 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
StagingFilterDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/StagingFilterDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import java.util.List;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.changes.Change;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
@JsonSerialize
public class StagingFilterDto {
private List<Change> changes;
}
| 352 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
AuthenticationDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/AuthenticationDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import lombok.Setter;
import java.util.Map;
@JsonSerialize
@Getter
@Setter
public class AuthenticationDto {
private String authenticationType;
private Map<String, Object> authentication;
}
| 352 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CertificateResponseDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/CertificateResponseDto.java | package com.hermesworld.ais.galapagos.applications.controller;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class CertificateResponseDto {
private String fileName;
private String fileContentsBase64;
}
| 236 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationsController.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.type.TypeFactory;
import com.hermesworld.ais.galapagos.applications.*;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentApiException;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthUtil;
import com.hermesworld.ais.galapagos.changes.Change;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.staging.Staging;
import com.hermesworld.ais.galapagos.staging.StagingResult;
import com.hermesworld.ais.galapagos.staging.StagingService;
import com.hermesworld.ais.galapagos.util.CertificateUtil;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.security.access.annotation.Secured;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.server.ResponseStatusException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.cert.CertificateException;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
@RestController
@Slf4j
public class ApplicationsController {
private final ApplicationsService applicationsService;
private final StagingService stagingService;
private final KafkaClusters kafkaClusters;
public ApplicationsController(ApplicationsService applicationsService, StagingService stagingService,
KafkaClusters kafkaClusters) {
this.applicationsService = applicationsService;
this.stagingService = stagingService;
this.kafkaClusters = kafkaClusters;
}
@GetMapping(value = "/api/applications", produces = MediaType.APPLICATION_JSON_VALUE)
public List<KnownApplicationDto> listApplications(
@RequestParam(required = false, defaultValue = "false") boolean excludeUserApps) {
return applicationsService.getKnownApplications(excludeUserApps).stream().map(app -> toKnownAppDto(app))
.collect(Collectors.toList());
}
@GetMapping(value = "/api/me/applications", produces = MediaType.APPLICATION_JSON_VALUE)
public List<KnownApplicationDto> listUserApplications() {
return applicationsService.getUserApplications().stream().map(app -> toKnownAppDto(app))
.collect(Collectors.toList());
}
@GetMapping(value = "/api/me/requests", produces = MediaType.APPLICATION_JSON_VALUE)
public List<ApplicationOwnerRequest> getUserApplicationOwnerRequests() {
return applicationsService.getUserApplicationOwnerRequests();
}
@PutMapping(value = "/api/me/requests", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public ApplicationOwnerRequest submitApplicationOwnerRequest(
@RequestBody ApplicationOwnerRequestSubmissionDto request) {
if (!StringUtils.hasLength(request.getApplicationId())) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"Required parameter applicationId is missing or empty");
}
applicationsService.getKnownApplication(request.getApplicationId())
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
try {
return applicationsService.submitApplicationOwnerRequest(request.getApplicationId(), request.getComments())
.get();
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not submit application owner request: ");
}
catch (InterruptedException e) {
return null;
}
}
@GetMapping(value = "/api/registered-applications/{envId}", produces = MediaType.APPLICATION_JSON_VALUE)
public List<KnownApplicationDto> getRegisteredApplications(@PathVariable String envId) {
return applicationsService.getAllApplicationMetadata(envId).stream()
.map(app -> toKnownAppDto(applicationsService.getKnownApplication(app.getApplicationId()).orElse(null)))
.filter(app -> app != null).collect(Collectors.toList());
}
@GetMapping(value = "/api/admin/requests", produces = MediaType.APPLICATION_JSON_VALUE)
@Secured("ROLE_ADMIN")
public List<ApplicationOwnerRequest> getAllApplicationOwnerRequests() {
return applicationsService.getAllApplicationOwnerRequests();
}
@PostMapping(value = "/api/admin/requests/{id}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
@Secured("ROLE_ADMIN")
public ApplicationOwnerRequest updateApplicationOwnerRequest(@PathVariable String id,
@RequestBody UpdateApplicationOwnerRequestDto updateData) {
try {
return applicationsService.updateApplicationOwnerRequest(id, updateData.getNewState()).get();
}
catch (InterruptedException e) {
return null;
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not update application owner request: ");
}
}
@DeleteMapping(value = "/api/me/requests/{id}")
public void cancelApplicationOwnerRequest(@PathVariable String id) {
try {
Boolean b = applicationsService.cancelUserApplicationOwnerRequest(id).get();
if (!b) {
throw new ResponseStatusException(HttpStatus.NOT_FOUND);
}
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not retrieve user's requests: ");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@GetMapping(value = "/api/environments/{environmentId}/prefixes/{applicationId}")
public ApplicationPrefixesDto getApplicationPrefixes(@PathVariable String environmentId,
@PathVariable String applicationId) {
return applicationsService.getApplicationMetadata(environmentId, applicationId)
.map(metadata -> toPrefixesDto(metadata))
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
}
@PostMapping(value = "/api/certificates/{applicationId}/{environmentId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public CertificateResponseDto updateApplicationCertificate(@PathVariable String applicationId,
@PathVariable String environmentId, @RequestBody CertificateRequestDto request) {
KnownApplication app = applicationsService.getKnownApplication(applicationId)
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
if (!applicationsService.isUserAuthorizedFor(applicationId)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
kafkaClusters.getEnvironmentMetadata(environmentId)
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
String filename = CertificateUtil.toAppCn(app.getName()) + "_" + environmentId;
if (!request.isGenerateKey()) {
String csrData = request.getCsrData();
if (ObjectUtils.isEmpty(csrData)) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"No CSR (csrData) present! Set generateKey to true if you want the server to generate a private key for you (not recommended).");
}
filename += ".cer";
}
else {
if (environmentId.equals(kafkaClusters.getProductionEnvironmentId())) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"You cannot get a server-side generated key for production environments.");
}
filename += ".p12";
}
try {
JSONObject requestParams = new JSONObject(JsonUtil.newObjectMapper().writeValueAsString(request));
ByteArrayOutputStream cerOut = new ByteArrayOutputStream();
applicationsService.registerApplicationOnEnvironment(environmentId, applicationId, requestParams, cerOut)
.get();
CertificateResponseDto dto = new CertificateResponseDto();
dto.setFileName(filename);
dto.setFileContentsBase64(Base64.getEncoder().encodeToString(cerOut.toByteArray()));
return dto;
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not create certificate: ");
}
catch (JsonProcessingException e) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "Invalid JSON body");
}
catch (InterruptedException e) {
return null;
}
}
@PostMapping(value = "/api/apikeys/{applicationId}/{environmentId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public CreatedApiKeyDto createApiKeyForApplication(@PathVariable String environmentId,
@PathVariable String applicationId, @RequestBody String request) {
applicationsService.getKnownApplication(applicationId)
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
if (!applicationsService.isUserAuthorizedFor(applicationId)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
kafkaClusters.getEnvironmentMetadata(environmentId)
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject params = ObjectUtils.isEmpty(request) ? new JSONObject() : new JSONObject(request);
ApplicationMetadata metadata = applicationsService
.registerApplicationOnEnvironment(environmentId, applicationId, params, baos).get();
CreatedApiKeyDto dto = new CreatedApiKeyDto();
dto.setApiKey(ConfluentCloudAuthUtil.getApiKey(metadata.getAuthenticationJson()));
dto.setApiSecret(baos.toString(StandardCharsets.UTF_8));
return dto;
}
catch (JSONException e) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not create API Key: ");
}
catch (InterruptedException e) {
return null;
}
}
@GetMapping(value = "/api/authentications/{applicationId}", produces = MediaType.APPLICATION_JSON_VALUE)
public ApplicationAuthenticationsDto listApplicationAuthentications(@PathVariable String applicationId) {
if (!applicationsService.isUserAuthorizedFor(applicationId)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
Map<String, AuthenticationDto> authPerEnv = new HashMap<>();
for (KafkaEnvironmentConfig env : kafkaClusters.getEnvironmentsMetadata()) {
ApplicationMetadata metadata = applicationsService.getApplicationMetadata(env.getId(), applicationId)
.orElse(null);
if (metadata != null && metadata.getAuthenticationJson() != null) {
AuthenticationDto dto = new AuthenticationDto();
dto.setAuthenticationType(env.getAuthenticationMode());
dto.setAuthentication(new JSONObject(metadata.getAuthenticationJson()).toMap());
authPerEnv.put(env.getId(), dto);
}
}
ApplicationAuthenticationsDto result = new ApplicationAuthenticationsDto();
result.setAuthentications(authPerEnv);
return result;
}
@GetMapping(value = "/api/environments/{environmentId}/staging/{applicationId}", produces = MediaType.APPLICATION_JSON_VALUE)
public Staging describeStaging(@PathVariable String environmentId, @PathVariable String applicationId) {
if (!applicationsService.isUserAuthorizedFor(applicationId)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
return stagingService.prepareStaging(applicationId, environmentId, Collections.emptyList()).get();
}
catch (ExecutionException e) {
// noinspection ThrowableNotThrown
handleExecutionException(e, "Could not prepare staging: ");
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, e.getMessage());
}
catch (InterruptedException e) {
return null;
}
}
@PostMapping(value = "/api/environments/{environmentId}/staging/{applicationId}", produces = MediaType.APPLICATION_JSON_VALUE)
public List<StagingResult> performStaging(@PathVariable String environmentId, @PathVariable String applicationId,
@RequestBody String stagingFilterRaw) {
if (!applicationsService.isUserAuthorizedFor(applicationId)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
List<Change> stagingFilter = null;
if (!ObjectUtils.isEmpty(stagingFilterRaw)) {
try {
stagingFilter = JsonUtil.newObjectMapper().readValue(stagingFilterRaw,
TypeFactory.defaultInstance().constructCollectionType(ArrayList.class, Change.class));
}
catch (IOException e) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
}
try {
return stagingService.prepareStaging(applicationId, environmentId, stagingFilter).get().perform();
}
catch (ExecutionException e) {
throw handleExecutionException(e, "Could not prepare staging: ");
}
catch (InterruptedException e) {
return Collections.emptyList();
}
}
private ResponseStatusException handleExecutionException(ExecutionException e, String msgPrefix) {
Throwable t = e.getCause();
if (t instanceof CertificateException) {
return new ResponseStatusException(HttpStatus.BAD_REQUEST, msgPrefix + t.getMessage());
}
if (t instanceof ConfluentApiException) {
log.error("Encountered Confluent API Exception", t);
return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR);
}
if (t instanceof NoSuchElementException) {
return new ResponseStatusException(HttpStatus.NOT_FOUND);
}
if ((t instanceof IllegalStateException) || (t instanceof IllegalArgumentException)) {
return new ResponseStatusException(HttpStatus.BAD_REQUEST, t.getMessage());
}
log.error("Unhandled exception in ApplicationsController", t);
return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR);
}
private KnownApplicationDto toKnownAppDto(KnownApplication app) {
if (app == null) {
return null;
}
return new KnownApplicationDto(app.getId(), app.getName(),
app.getInfoUrl() == null ? null : app.getInfoUrl().toString(), toCapDtos(app.getBusinessCapabilities()),
new ArrayList<>(app.getAliases()));
}
private List<BusinessCapabilityDto> toCapDtos(List<? extends BusinessCapability> caps) {
return caps.stream().map(cap -> new BusinessCapabilityDto(cap.getId(), cap.getName()))
.collect(Collectors.toList());
}
private ApplicationPrefixesDto toPrefixesDto(ApplicationPrefixes prefixes) {
return new ApplicationPrefixesDto(prefixes.getInternalTopicPrefixes(), prefixes.getConsumerGroupPrefixes(),
prefixes.getTransactionIdPrefixes());
}
}
| 16,236 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KnownApplicationImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/impl/KnownApplicationImpl.java | package com.hermesworld.ais.galapagos.applications.impl;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.applications.BusinessCapability;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.extern.slf4j.Slf4j;
@JsonSerialize
@Slf4j
public class KnownApplicationImpl implements KnownApplication, HasKey, Comparable<KnownApplicationImpl> {
private String id;
private String name;
private List<String> aliases;
private String infoUrl;
private List<BusinessCapabilityImpl> businessCapabilities;
@JsonCreator
public KnownApplicationImpl(@JsonProperty(value = "id", required = true) String id,
@JsonProperty(value = "name", required = true) String name) {
if (id == null) {
throw new IllegalArgumentException("id must not be null");
}
if (name == null) {
throw new IllegalArgumentException("name must not be null");
}
this.id = id;
this.name = name;
}
@Override
public String key() {
return id;
}
@Override
public String getId() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public Set<String> getAliases() {
return this.aliases == null ? Collections.emptySet() : new HashSet<>(this.aliases);
}
public void setAliases(List<String> aliases) {
this.aliases = aliases;
}
@Override
public URL getInfoUrl() {
try {
return this.infoUrl == null ? null : new URL(this.infoUrl);
}
catch (MalformedURLException e) {
log.warn("Invalid info URL found in galapagos.internal.known-applications topic: " + this.infoUrl, e);
return null;
}
}
public void setInfoUrl(String infoUrl) {
this.infoUrl = infoUrl;
}
@Override
public List<BusinessCapability> getBusinessCapabilities() {
return this.businessCapabilities == null ? Collections.emptyList()
: this.businessCapabilities.stream().collect(Collectors.toList());
}
public void setBusinessCapabilities(List<BusinessCapabilityImpl> businessCapabilities) {
this.businessCapabilities = businessCapabilities;
}
@Override
public int compareTo(KnownApplicationImpl o) {
if (o == null) {
return 1;
}
return name.compareToIgnoreCase(o.name);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj == this) {
return true;
}
if (obj.getClass() != getClass()) {
return false;
}
return id.equals(((KnownApplicationImpl) obj).id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
| 3,265 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsServiceImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/impl/ApplicationsServiceImpl.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.hermesworld.ais.galapagos.applications.*;
import com.hermesworld.ais.galapagos.events.GalapagosEventManager;
import com.hermesworld.ais.galapagos.events.GalapagosEventSink;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.util.TimeService;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONObject;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import java.io.IOException;
import java.io.OutputStream;
import java.time.ZonedDateTime;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import java.util.stream.Collectors;
@Service
@Slf4j
public class ApplicationsServiceImpl implements ApplicationsService, InitPerCluster {
private final KafkaClusters kafkaClusters;
private final TopicBasedRepository<KnownApplicationImpl> knownApplicationsSource;
private final TopicBasedRepository<ApplicationOwnerRequest> requestsRepository;
private final CurrentUserService currentUserService;
private final TimeService timeService;
private final NamingService namingService;
private final GalapagosEventManager eventManager;
private static final String TOPIC_NAME = "application-metadata";
private static final String KNOWN_APPLICATIONS_TOPIC_NAME = "known-applications";
private static final String REQUESTS_TOPIC_NAME = "application-owner-requests";
private static final Comparator<ApplicationOwnerRequest> requestComparator = (r1, r2) -> r2.getLastStatusChangeAt()
.compareTo(r1.getLastStatusChangeAt());
public ApplicationsServiceImpl(KafkaClusters kafkaClusters, CurrentUserService currentUserService,
TimeService timeService, NamingService namingService, GalapagosEventManager eventManager) {
this.kafkaClusters = kafkaClusters;
this.knownApplicationsSource = kafkaClusters.getGlobalRepository(KNOWN_APPLICATIONS_TOPIC_NAME,
KnownApplicationImpl.class);
this.requestsRepository = kafkaClusters.getGlobalRepository(REQUESTS_TOPIC_NAME, ApplicationOwnerRequest.class);
this.currentUserService = currentUserService;
this.timeService = timeService;
this.namingService = namingService;
this.eventManager = eventManager;
}
@Override
public void init(KafkaCluster cluster) {
getRepository(cluster).getObjects();
}
@Override
public List<? extends KnownApplication> getKnownApplications(boolean excludeUserApps) {
List<? extends KnownApplication> apps = internalGetKnownApplications();
if (!excludeUserApps) {
return apps;
}
List<String> userApps = getUserApplicationsApprovedOrSubmitted().stream().map(KnownApplication::getId)
.collect(Collectors.toList());
return apps.stream().filter(app -> !userApps.contains(app.getId())).collect(Collectors.toList());
}
@Override
public Optional<KnownApplication> getKnownApplication(String applicationId) {
return internalGetKnownApplications().stream().filter(app -> applicationId.equals(app.getId())).findFirst();
}
@Override
public Optional<ApplicationMetadata> getApplicationMetadata(String environmentId, String applicationId) {
return kafkaClusters.getEnvironment(environmentId)
.flatMap(cluster -> getRepository(cluster).getObject(applicationId));
}
@Override
public CompletableFuture<ApplicationOwnerRequest> submitApplicationOwnerRequest(String applicationId,
String comments) {
String userName = currentUserService.getCurrentUserName().orElse(null);
if (userName == null) {
return noUser();
}
if (getKnownApplication(applicationId).isEmpty()) {
return unknownApplication(applicationId);
}
Optional<ApplicationOwnerRequest> existing = getRequestsRepository().getObjects().stream()
.filter(req -> userName.equals(req.getUserName()) && applicationId.equals(req.getApplicationId()))
.findAny();
if (existing.isPresent() && (existing.get().getState() == RequestState.SUBMITTED
|| existing.get().getState() == RequestState.APPROVED)) {
return CompletableFuture.completedFuture(existing.get());
}
ApplicationOwnerRequest request;
if (existing.isPresent()) {
request = existing.get();
}
else {
request = new ApplicationOwnerRequest();
request.setId(UUID.randomUUID().toString());
request.setCreatedAt(timeService.getTimestamp());
}
request.setApplicationId(applicationId);
request.setState(RequestState.SUBMITTED);
request.setUserName(userName);
request.setNotificationEmailAddress(currentUserService.getCurrentUserEmailAddress().orElse(null));
request.setComments(comments);
request.setLastStatusChangeAt(timeService.getTimestamp());
request.setLastStatusChangeBy(userName);
GalapagosEventSink eventSink = eventManager
.newEventSink(kafkaClusters.getEnvironment(kafkaClusters.getProductionEnvironmentId()).orElse(null));
return getRequestsRepository().save(request)
.thenCompose(o -> eventSink.handleApplicationOwnerRequestCreated(request)).thenApply(o -> request);
}
@Override
public List<ApplicationOwnerRequest> getUserApplicationOwnerRequests() {
String userName = currentUserService.getCurrentUserName().orElse(null);
if (userName == null) {
return Collections.emptyList();
}
return getRequestsRepository().getObjects().stream().filter(req -> userName.equals(req.getUserName()))
.sorted(requestComparator).collect(Collectors.toList());
}
@Override
public List<ApplicationOwnerRequest> getAllApplicationOwnerRequests() {
return getRequestsRepository().getObjects().stream().sorted(requestComparator).collect(Collectors.toList());
}
@Override
public CompletableFuture<ApplicationOwnerRequest> updateApplicationOwnerRequest(String requestId,
RequestState newState) {
String userName = currentUserService.getCurrentUserName().orElse(null);
if (userName == null) {
return noUser();
}
Optional<ApplicationOwnerRequest> opRequest = getRequestsRepository().getObjects().stream()
.filter(req -> requestId.equals(req.getId())).findFirst();
if (opRequest.isEmpty()) {
return unknownRequest(requestId);
}
ApplicationOwnerRequest request = opRequest.get();
request.setState(newState);
request.setLastStatusChangeAt(timeService.getTimestamp());
request.setLastStatusChangeBy(userName);
GalapagosEventSink eventSink = eventManager
.newEventSink(kafkaClusters.getEnvironment(kafkaClusters.getProductionEnvironmentId()).orElse(null));
return getRequestsRepository().save(request)
.thenCompose(o -> eventSink.handleApplicationOwnerRequestUpdated(request)).thenApply(o -> request);
}
@Override
public CompletableFuture<Boolean> cancelUserApplicationOwnerRequest(String requestId) throws IllegalStateException {
String userName = currentUserService.getCurrentUserName().orElse(null);
if (userName == null) {
return noUser();
}
Optional<ApplicationOwnerRequest> opRequest = getRequestsRepository().getObjects().stream()
.filter(req -> requestId.equals(req.getId())).findFirst();
if (opRequest.isEmpty()) {
return unknownRequest(requestId);
}
GalapagosEventSink eventSink = eventManager
.newEventSink(kafkaClusters.getEnvironment(kafkaClusters.getProductionEnvironmentId()).orElse(null));
ApplicationOwnerRequest request = opRequest.get();
if (request.getState() == RequestState.SUBMITTED) {
return getRequestsRepository().delete(request)
.thenCompose(o -> eventSink.handleApplicationOwnerRequestCanceled(request))
.thenApply(o -> Boolean.TRUE);
}
if (request.getState() == RequestState.APPROVED) {
request.setState(RequestState.RESIGNED);
request.setLastStatusChangeAt(timeService.getTimestamp());
request.setLastStatusChangeBy(userName);
return getRequestsRepository().save(request)
.thenCompose(o -> eventSink.handleApplicationOwnerRequestUpdated(request))
.thenApply(o -> Boolean.TRUE);
}
return CompletableFuture
.failedFuture(new IllegalStateException("May only cancel requests in state SUBMITTED or APPROVED"));
}
@Override
public List<? extends KnownApplication> getUserApplications() {
return getUserApplications(Set.of(RequestState.APPROVED));
}
@Override
public boolean isUserAuthorizedFor(String applicationId) {
return getUserApplicationOwnerRequests().stream().anyMatch(
req -> req.getState() == RequestState.APPROVED && applicationId.equals(req.getApplicationId()));
}
@Override
public CompletableFuture<ApplicationMetadata> registerApplicationOnEnvironment(String environmentId,
String applicationId, JSONObject registerParams, OutputStream outputStreamForSecret) {
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(environmentId).orElse(null);
if (authModule == null) {
return unknownEnvironment(environmentId);
}
KafkaCluster kafkaCluster = kafkaClusters.getEnvironment(environmentId).orElse(null);
if (kafkaCluster == null) {
return unknownEnvironment(environmentId);
}
KnownApplication knownApplication = getKnownApplication(applicationId).orElse(null);
if (knownApplication == null) {
return unknownApplication(applicationId);
}
String applicationName = namingService.normalize(knownApplication.getName());
ApplicationMetadata existing = getApplicationMetadata(environmentId, applicationId).orElse(null);
CompletableFuture<CreateAuthenticationResult> updateOrCreateFuture = null;
if (existing != null) {
String json = existing.getAuthenticationJson();
if (!ObjectUtils.isEmpty(json)) {
updateOrCreateFuture = authModule.updateApplicationAuthentication(applicationId, applicationName,
registerParams, new JSONObject(json));
}
}
if (updateOrCreateFuture == null) {
updateOrCreateFuture = authModule.createApplicationAuthentication(applicationId, applicationName,
registerParams);
}
GalapagosEventSink eventSink = eventManager.newEventSink(kafkaCluster);
JSONObject oldAuthentication = existing != null && StringUtils.hasLength(existing.getAuthenticationJson())
? new JSONObject(existing.getAuthenticationJson())
: new JSONObject();
return updateOrCreateFuture
.thenCompose(result -> updateApplicationMetadata(kafkaCluster, knownApplication, existing, result)
.thenCompose(meta -> futureWrite(outputStreamForSecret, result.getPrivateAuthenticationData())
.thenApply(o -> meta)))
.thenCompose(meta -> {
if (existing != null) {
return eventSink.handleApplicationAuthenticationChanged(meta, oldAuthentication,
new JSONObject(meta.getAuthenticationJson())).thenApply(o -> meta);
}
return eventSink.handleApplicationRegistered(meta).thenApply(o -> meta);
});
}
@Override
public CompletableFuture<ApplicationMetadata> resetApplicationPrefixes(String environmentId, String applicationId) {
KafkaCluster cluster = kafkaClusters.getEnvironment(environmentId).orElse(null);
if (cluster == null) {
return unknownEnvironment(environmentId);
}
KnownApplication app = getKnownApplication(applicationId).orElse(null);
if (app == null) {
return unknownApplication(applicationId);
}
return getApplicationMetadata(environmentId, applicationId).map(existing -> {
ApplicationMetadata newMetadata = new ApplicationMetadata(existing);
ApplicationPrefixes newPrefixes = namingService.getAllowedPrefixes(app);
newMetadata.setInternalTopicPrefixes(newPrefixes.getInternalTopicPrefixes());
newMetadata.setConsumerGroupPrefixes(newPrefixes.getConsumerGroupPrefixes());
newMetadata.setTransactionIdPrefixes(newPrefixes.getTransactionIdPrefixes());
return getRepository(cluster).save(newMetadata).thenApply(o -> newMetadata);
}).orElseGet(() -> CompletableFuture.failedFuture(new NoSuchElementException()));
}
private List<KnownApplication> internalGetKnownApplications() {
return knownApplicationsSource.getObjects().stream().sorted().collect(Collectors.toList());
}
private CompletableFuture<ApplicationMetadata> updateApplicationMetadata(KafkaCluster kafkaCluster,
KnownApplication application, ApplicationMetadata existingMetadata,
CreateAuthenticationResult authenticationResult) {
ApplicationMetadata newMetadata = new ApplicationMetadata();
ApplicationPrefixes prefixes = namingService.getAllowedPrefixes(application);
for (String environmentId : kafkaClusters.getEnvironmentIds()) {
ApplicationMetadata metadata = getApplicationMetadata(environmentId, application.getId()).orElse(null);
if (metadata != null) {
prefixes = prefixes.combineWith(metadata);
}
}
if (existingMetadata != null) {
prefixes = prefixes.combineWith(existingMetadata);
}
newMetadata.setApplicationId(application.getId());
newMetadata.setInternalTopicPrefixes(prefixes.getInternalTopicPrefixes());
newMetadata.setConsumerGroupPrefixes(prefixes.getConsumerGroupPrefixes());
newMetadata.setTransactionIdPrefixes(prefixes.getTransactionIdPrefixes());
newMetadata.setAuthenticationJson(authenticationResult.getPublicAuthenticationData().toString());
return getRepository(kafkaCluster).save(newMetadata).thenApply(o -> newMetadata);
}
private CompletableFuture<Void> futureWrite(OutputStream os, byte[] data) {
try {
os.write(data);
return CompletableFuture.completedFuture(null);
}
catch (IOException e) {
return CompletableFuture.failedFuture(e);
}
}
@Scheduled(initialDelay = 30000, fixedDelayString = "PT6H")
void removeOldRequests() {
log.debug("removeOldRequests() by scheduler");
ZonedDateTime maxAge = timeService.getTimestamp().minusDays(30);
List<ApplicationOwnerRequest> oldRequests = getRequestsRepository().getObjects().stream()
.filter(req -> (req.getState() == RequestState.REJECTED || req.getState() == RequestState.REVOKED
|| req.getState() == RequestState.RESIGNED) && req.getLastStatusChangeAt().isBefore(maxAge))
.collect(Collectors.toList());
for (ApplicationOwnerRequest request : oldRequests) {
log.info("Removing request for user " + request.getUserName() + " and application "
+ request.getApplicationId() + " because it rejects application access and is older than 30 days");
getRequestsRepository().delete(request);
}
}
private TopicBasedRepository<ApplicationMetadata> getRepository(KafkaCluster kafkaCluster) {
return kafkaCluster.getRepository(TOPIC_NAME, ApplicationMetadata.class);
}
private TopicBasedRepository<ApplicationOwnerRequest> getRequestsRepository() {
return requestsRepository;
}
private List<? extends KnownApplication> getUserApplicationsApprovedOrSubmitted() {
return getUserApplications(Set.of(RequestState.APPROVED, RequestState.SUBMITTED));
}
private List<? extends KnownApplication> getUserApplications(Set<RequestState> requestStates) {
String userName = currentUserService.getCurrentUserName()
.orElseThrow(() -> new IllegalStateException("No currently logged in user"));
Map<String, KnownApplication> apps = internalGetKnownApplications().stream()
.collect(Collectors.toMap(KnownApplication::getId, Function.identity()));
return getRequestsRepository().getObjects().stream()
.filter(req -> requestStates.contains(req.getState()) && userName.equals(req.getUserName()))
.map(req -> apps.get(req.getApplicationId())).filter(Objects::nonNull).collect(Collectors.toList());
}
private static <T> CompletableFuture<T> noUser() {
return CompletableFuture.failedFuture(new IllegalStateException("No user currently logged in"));
}
private static <T> CompletableFuture<T> unknownApplication(String applicationId) {
return CompletableFuture.failedFuture(new NoSuchElementException("Unknown application ID: " + applicationId));
}
private static <T> CompletableFuture<T> unknownRequest(String requestId) {
return CompletableFuture.failedFuture(new NoSuchElementException("Unknown request ID: " + requestId));
}
private static <T> CompletableFuture<T> unknownEnvironment(String environmentId) {
return CompletableFuture
.failedFuture(new NoSuchElementException("Unknown Kafka environment: " + environmentId));
}
}
| 18,723 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UpdateApplicationAclsListener.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/impl/UpdateApplicationAclsListener.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import com.hermesworld.ais.galapagos.kafka.util.AclSupport;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.thymeleaf.util.StringUtils;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@Component
public class UpdateApplicationAclsListener
implements TopicEventsListener, ApplicationEventsListener, SubscriptionEventsListener {
private final KafkaClusters kafkaClusters;
private final SubscriptionService subscriptionService;
private final ApplicationsService applicationsService;
private final AclSupport aclSupport;
public UpdateApplicationAclsListener(KafkaClusters kafkaClusters, SubscriptionService subscriptionService,
ApplicationsService applicationsService, AclSupport aclSupport) {
this.kafkaClusters = kafkaClusters;
this.subscriptionService = subscriptionService;
this.applicationsService = applicationsService;
this.aclSupport = aclSupport;
}
@Override
public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event) {
return applicationsService
.getApplicationMetadata(getCluster(event).getId(), event.getMetadata().getClientApplicationId())
.map(metadata -> updateApplicationAcls(getCluster(event), metadata)).orElse(FutureUtil.noop());
}
@Override
public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event) {
// same implementation :-)
return handleSubscriptionCreated(event);
}
@Override
public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event) {
// same implementation :-)
return handleSubscriptionCreated(event);
}
@Override
public CompletableFuture<Void> handleApplicationRegistered(ApplicationEvent event) {
return updateApplicationAcls(getCluster(event), event.getMetadata());
}
@Override
public CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationAuthenticationChangeEvent event) {
// early check here because of the potential removal of ACLs below
if (shallSkipUpdateAcls(getCluster(event))) {
return FutureUtil.noop();
}
ApplicationMetadata prevMetadata = new ApplicationMetadata(event.getMetadata());
prevMetadata.setAuthenticationJson(event.getOldAuthentication().toString());
ApplicationUser newUser = new ApplicationUser(event);
ApplicationUser prevUser = new ApplicationUser(prevMetadata, getCluster(event).getId());
if ((newUser.getKafkaUserName() != null && newUser.getKafkaUserName().equals(prevUser.getKafkaUserName()))
|| prevUser.getKafkaUserName() == null) {
// Cluster implementation will deal about ACL delta
return updateApplicationAcls(getCluster(event), event.getMetadata());
}
else {
return updateApplicationAcls(getCluster(event), event.getMetadata())
.thenCompose(o -> getCluster(event).removeUserAcls(prevUser));
}
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequestEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequestEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequestEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) {
// same implementation :-)
return handleTopicDeleted(event);
}
@Override
public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) {
if (event.getMetadata().getType() == TopicType.INTERNAL) {
return FutureUtil.noop();
}
return applicationsService
.getApplicationMetadata(getCluster(event).getId(), event.getMetadata().getOwnerApplicationId())
.map(metadata -> updateApplicationAcls(getCluster(event), metadata)).orElse(FutureUtil.noop());
}
@Override
public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) {
KafkaCluster cluster = getCluster(event);
Set<String> applicationIds = subscriptionService
.getSubscriptionsForTopic(cluster.getId(), event.getMetadata().getName(), true).stream()
.map(SubscriptionMetadata::getClientApplicationId).collect(Collectors.toSet());
CompletableFuture<Void> result = FutureUtil.noop();
for (String appId : applicationIds) {
ApplicationMetadata appMeta = applicationsService.getApplicationMetadata(cluster.getId(), appId)
.orElse(null);
if (appMeta != null) {
result = result.thenCompose(o -> updateApplicationAcls(cluster, appMeta));
}
}
return result;
}
@Override
public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) {
KafkaCluster cluster = getCluster(event);
return applicationsService.getApplicationMetadata(cluster.getId(), event.getProducerApplicationId())
.map(metadata -> updateApplicationAcls(getCluster(event), metadata)).orElse(FutureUtil.noop());
}
@Override
public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) {
return applicationsService.getApplicationMetadata(getCluster(event).getId(), event.getProducerApplicationId())
.map(metadata -> updateApplicationAcls(getCluster(event), metadata)).orElse(FutureUtil.noop());
}
@Override
public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) {
return FutureUtil.noop();
}
/**
* Allows external access to the ACL logic for applications, which is quite complex. Currently known user is the
* Update Listener of the Dev Certificates (DevUserAclListener).
*
* @param metadata Metadata of the application
* @param environmentId Environment for which the ACLs are needed.
*
* @return A KafkaUser object which can be queried for its ACLs.
*/
public KafkaUser getApplicationUser(ApplicationMetadata metadata, String environmentId) {
return new ApplicationUser(metadata, environmentId);
}
private KafkaCluster getCluster(AbstractGalapagosEvent event) {
return event.getContext().getKafkaCluster();
}
private boolean shallSkipUpdateAcls(KafkaCluster cluster) {
return kafkaClusters.getEnvironmentMetadata(cluster.getId()).map(config -> config.isNoUpdateApplicationAcls())
.orElse(false);
}
private CompletableFuture<Void> updateApplicationAcls(KafkaCluster cluster, ApplicationMetadata metadata) {
if (shallSkipUpdateAcls(cluster)) {
return FutureUtil.noop();
}
return cluster.updateUserAcls(new ApplicationUser(metadata, cluster.getId()));
}
private class ApplicationUser implements KafkaUser {
private final ApplicationMetadata metadata;
private final String environmentId;
public ApplicationUser(ApplicationEvent event) {
this(event.getMetadata(), event.getContext().getKafkaCluster().getId());
}
public ApplicationUser(ApplicationMetadata metadata, String environmentId) {
this.metadata = metadata;
this.environmentId = environmentId;
}
@Override
public String getKafkaUserName() {
JSONObject authData;
try {
authData = new JSONObject(metadata.getAuthenticationJson());
return kafkaClusters.getAuthenticationModule(environmentId).map(m -> m.extractKafkaUserName(authData))
.orElse(null);
}
catch (JSONException e) {
LoggerFactory.getLogger(UpdateApplicationAclsListener.class)
.warn("Could not parse authentication JSON of application {}", metadata.getApplicationId(), e);
return null;
}
}
@Override
public Collection<AclBinding> getRequiredAclBindings() {
return aclSupport.getRequiredAclBindings(environmentId, metadata, getKafkaUserName(), false);
}
}
}
| 10,533 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
BusinessCapabilityImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/applications/impl/BusinessCapabilityImpl.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.applications.BusinessCapability;
@JsonSerialize
public class BusinessCapabilityImpl implements BusinessCapability {
private String id;
private String name;
@JsonCreator
public BusinessCapabilityImpl(@JsonProperty(value = "id", required = true) String id,
@JsonProperty(value = "name", required = true) String name) {
this.id = id;
this.name = name;
}
@Override
public String getId() {
return id;
}
@Override
public String getName() {
return name;
}
}
| 812 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
HasKey.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/HasKey.java | package com.hermesworld.ais.galapagos.util;
public interface HasKey {
public String key();
}
| 100 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
FutureUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/FutureUtil.java | package com.hermesworld.ais.galapagos.util;
import java.util.NoSuchElementException;
import java.util.concurrent.CompletableFuture;
public final class FutureUtil {
private static final CompletableFuture<Void> noop = CompletableFuture.completedFuture(null);
private FutureUtil() {
}
public static <T> CompletableFuture<T> noUser() {
return CompletableFuture
.failedFuture(new IllegalStateException("A user must be logged in for this operation."));
}
public static <T> CompletableFuture<T> noSuchEnvironment(String environmentId) {
return CompletableFuture
.failedFuture(new NoSuchElementException("No environment with ID " + environmentId + " found."));
}
public static CompletableFuture<Void> noop() {
return noop;
}
}
| 819 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TimeService.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/TimeService.java | package com.hermesworld.ais.galapagos.util;
import java.time.ZonedDateTime;
/**
* Component interface to retrieve a current timestamp. All components should use this interface to get a timestamp
* instead of directly calling {@link ZonedDateTime#now()} to enable unit tests which replace this component with a
* mock.
*
* @author AlbrechtFlo
*
*/
public interface TimeService {
ZonedDateTime getTimestamp();
}
| 425 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
JsonUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/JsonUtil.java | package com.hermesworld.ais.galapagos.util;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.hermesworld.ais.galapagos.changes.Change;
import com.hermesworld.ais.galapagos.changes.impl.ChangeDeserizalizer;
public final class JsonUtil {
private JsonUtil() {
}
public static ObjectMapper newObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.registerModule(new Jdk8Module()).registerModule(new JavaTimeModule());
mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
SimpleModule module = new SimpleModule("change-deserializer", Version.unknownVersion());
module.addDeserializer(Change.class, new ChangeDeserizalizer());
mapper.registerModule(module);
return mapper;
}
}
| 1,074 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CertificateUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/CertificateUtil.java | package com.hermesworld.ais.galapagos.util;
import org.bouncycastle.asn1.ASN1Encoding;
import org.bouncycastle.asn1.DERBMPString;
import org.bouncycastle.asn1.DERUTF8String;
import org.bouncycastle.asn1.oiw.OIWObjectIdentifiers;
import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
import org.bouncycastle.asn1.x500.AttributeTypeAndValue;
import org.bouncycastle.asn1.x500.RDN;
import org.bouncycastle.asn1.x500.X500Name;
import org.bouncycastle.asn1.x509.SubjectKeyIdentifier;
import org.bouncycastle.asn1.x509.X509ObjectIdentifiers;
import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
import org.bouncycastle.openssl.jcajce.JcaPEMWriter;
import org.bouncycastle.operator.ContentSigner;
import org.bouncycastle.operator.OperatorCreationException;
import org.bouncycastle.operator.OutputEncryptor;
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
import org.bouncycastle.pkcs.*;
import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
import org.bouncycastle.pkcs.jcajce.JcaPKCS12SafeBagBuilder;
import org.bouncycastle.pkcs.jcajce.JcePKCS12MacCalculatorBuilder;
import org.bouncycastle.pkcs.jcajce.JcePKCSPBEOutputEncryptorBuilder;
import java.io.IOException;
import java.io.StringWriter;
import java.security.*;
import java.security.cert.X509Certificate;
import java.util.*;
public final class CertificateUtil {
private static final Random RANDOM = new Random();
private CertificateUtil() {
}
public static String toAppCn(String appName) {
String name = appName.toLowerCase(Locale.US);
name = name.replaceAll("[^0-9a-zA-Z]", "_");
while (name.contains("__")) {
name = name.replace("__", "_");
}
if (name.startsWith("_")) {
name = name.substring(1);
}
if (name.endsWith("_")) {
name = name.substring(0, name.length() - 1);
}
return name;
}
public static String extractCn(String dn) {
return extractCn(new X500Name(dn));
}
public static String extractCn(X500Name name) {
return getCn(name.getRDNs());
}
public static PKCS10CertificationRequest buildCsr(X500Name subject, KeyPair keyPair)
throws OperatorCreationException {
PKCS10CertificationRequestBuilder csrBuilder = new JcaPKCS10CertificationRequestBuilder(subject,
keyPair.getPublic());
JcaContentSignerBuilder csBuilder = new JcaContentSignerBuilder("SHA256withRSA");
ContentSigner signer = csBuilder.build(keyPair.getPrivate());
return csrBuilder.build(signer);
}
public static byte[] buildPrivateKeyStore(X509Certificate publicCertificate, PrivateKey privateKey, char[] password)
throws IOException, PKCSException, NoSuchAlgorithmException, OperatorCreationException {
PKCS12PfxPduBuilder keyStoreBuilder = new PKCS12PfxPduBuilder();
JcaX509ExtensionUtils extUtils = new JcaX509ExtensionUtils();
SubjectKeyIdentifier pubKeyId = extUtils.createSubjectKeyIdentifier(publicCertificate.getPublicKey());
String cn = CertificateUtil.extractCn(publicCertificate.getSubjectX500Principal().getName());
PKCS12SafeBagBuilder certBagBuilder = new JcaPKCS12SafeBagBuilder(publicCertificate);
certBagBuilder.addBagAttribute(PKCS12SafeBag.friendlyNameAttribute, new DERBMPString(cn));
certBagBuilder.addBagAttribute(PKCS12SafeBag.localKeyIdAttribute, pubKeyId);
keyStoreBuilder
.addEncryptedData(new JcePKCSPBEOutputEncryptorBuilder(PKCSObjectIdentifiers.pbeWithSHAAnd128BitRC2_CBC)
.setProvider("BC").build(password), certBagBuilder.build());
OutputEncryptor encOut = new JcePKCSPBEOutputEncryptorBuilder(PKCSObjectIdentifiers.pbeWithSHAAnd128BitRC2_CBC)
.setProvider("BC").build(password);
PKCS12SafeBagBuilder keyBagBuilder = new JcaPKCS12SafeBagBuilder(privateKey, encOut);
keyBagBuilder.addBagAttribute(PKCS12SafeBag.friendlyNameAttribute, new DERBMPString(cn));
keyBagBuilder.addBagAttribute(PKCS12SafeBag.localKeyIdAttribute, pubKeyId);
keyStoreBuilder.addData(keyBagBuilder.build());
byte[] pkcs12Bytes = keyStoreBuilder
.build(new JcePKCS12MacCalculatorBuilder(OIWObjectIdentifiers.idSHA1), password)
.getEncoded(ASN1Encoding.DL);
Arrays.fill(password, '*');
return pkcs12Bytes;
}
public static KeyPair generateKeyPair() throws GeneralSecurityException {
KeyPairGenerator gen = KeyPairGenerator.getInstance("RSA", "BC");
gen.initialize(2048);
return gen.generateKeyPair();
}
public static X500Name uniqueX500Name(String cn) {
String ouValue = "certification_" + Integer.toHexString(RANDOM.nextInt());
List<RDN> rdns = new ArrayList<>();
rdns.add(new RDN(
new AttributeTypeAndValue(X509ObjectIdentifiers.organizationalUnitName, new DERUTF8String(ouValue))));
rdns.add(new RDN(new AttributeTypeAndValue(X509ObjectIdentifiers.commonName, new DERUTF8String(cn))));
return new X500Name(rdns.toArray(new RDN[0]));
}
public static String toPemString(PKCS10CertificationRequest request) {
StringWriter sw = new StringWriter();
JcaPEMWriter writer = new JcaPEMWriter(sw);
try {
writer.writeObject(request);
writer.flush();
return sw.toString();
}
catch (IOException e) {
// must not occur in memory
throw new RuntimeException(e);
}
}
private static String getCn(RDN[] rdns) {
Optional<String> opValue = Arrays.stream(rdns).filter(rdn -> isCn(rdn))
.map(rdn -> rdn.getFirst().getValue().toString()).findFirst();
return opValue.orElse(null);
}
private static boolean isCn(RDN rdn) {
return X509ObjectIdentifiers.commonName.equals(rdn.getFirst().getType());
}
}
| 6,005 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ZipUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/ZipUtil.java | package com.hermesworld.ais.galapagos.util;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
public final class ZipUtil {
private ZipUtil() {
}
public static byte[] zipFiles(String[] fileNames, byte[][] fileContents) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ZipOutputStream zip = new ZipOutputStream(baos);
try {
for (int i = 0; i < fileNames.length; i++) {
ZipEntry ze = new ZipEntry(fileNames[i]);
zip.putNextEntry(ze);
zip.write(fileContents[i]);
zip.closeEntry();
}
zip.close();
return baos.toByteArray();
}
catch (IOException e) {
// should not occur for memory-only operations
throw new RuntimeException(e);
}
}
}
| 932 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
BackupController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/controller/BackupController.java | package com.hermesworld.ais.galapagos.util.controller;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.security.access.annotation.Secured;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.server.ResponseStatusException;
@RestController
@Slf4j
public class BackupController {
private final KafkaClusters kafkaClusters;
private final ObjectMapper objectMapper = JsonUtil.newObjectMapper();
public BackupController(KafkaClusters kafkaClusters) {
this.kafkaClusters = kafkaClusters;
}
@GetMapping(value = "/api/admin/full-backup", produces = MediaType.APPLICATION_JSON_VALUE)
@Secured("ROLE_ADMIN")
public String createBackup() {
JSONObject result = new JSONObject();
for (String id : kafkaClusters.getEnvironmentIds()) {
kafkaClusters.getEnvironment(id).ifPresent(env -> result.put(id, backupEnvironment(env)));
}
return result.toString();
}
private JSONObject backupEnvironment(KafkaCluster cluster) {
JSONObject result = new JSONObject();
for (TopicBasedRepository<?> backupTopic : cluster.getRepositories()) {
result.put(backupTopic.getTopicName(),
backupTopicData(cluster.getRepository(backupTopic.getTopicName(), backupTopic.getValueClass())));
}
return result;
}
private JSONObject backupTopicData(TopicBasedRepository<? extends HasKey> repo) {
JSONObject result = new JSONObject();
for (HasKey obj : repo.getObjects()) {
try {
result.put(obj.key(), new JSONObject(objectMapper.writeValueAsString(obj)));
}
catch (JSONException | JsonProcessingException e) {
log.error("Could not serialize object for backup", e);
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
return result;
}
}
| 2,590 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TimeServiceImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/impl/TimeServiceImpl.java | package com.hermesworld.ais.galapagos.util.impl;
import java.time.ZonedDateTime;
import org.springframework.stereotype.Component;
import com.hermesworld.ais.galapagos.util.TimeService;
@Component
public class TimeServiceImpl implements TimeService {
@Override
public ZonedDateTime getTimestamp() {
return ZonedDateTime.now();
}
}
| 356 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
StartupRepositoryInitializer.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/util/impl/StartupRepositoryInitializer.java | package com.hermesworld.ais.galapagos.util.impl;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Component;
/**
* Due to the lazy and asynchronous nature of the topic-based repositories in the service implementations, the
* implementations can implement {@link InitPerCluster}. If they do, they will be called by this service once the
* ApplicationContext has started. Their <code>init()</code> method will be called for each known Kafka Cluster, so they
* can e.g. initialize their repositories.
*
* @author AlbrechtFlo
*
*/
@Component
@Slf4j
public class StartupRepositoryInitializer {
private final KafkaClusters kafkaClusters;
private final Duration initialRepositoryLoadWaitTime;
private final Duration repositoryLoadIdleTime;
public StartupRepositoryInitializer(KafkaClusters kafkaClusters,
@Value("${galapagos.initialRepositoryLoadWaitTime:5s}") Duration initialRepositoryLoadWaitTime,
@Value("${galapagos.repositoryLoadIdleTime:2s}") Duration repositoryLoadIdleTime) {
this.kafkaClusters = kafkaClusters;
this.initialRepositoryLoadWaitTime = initialRepositoryLoadWaitTime;
this.repositoryLoadIdleTime = repositoryLoadIdleTime;
}
@EventListener
public void initializePerCluster(ContextRefreshedEvent event) {
Collection<InitPerCluster> beans = event.getApplicationContext().getBeansOfType(InitPerCluster.class).values();
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
List<CompletableFuture<?>> futures = new ArrayList<>();
log.info("Waiting for Galapagos Metadata repositories to be initialized...");
try {
for (String id : kafkaClusters.getEnvironmentIds()) {
KafkaCluster cluster = kafkaClusters.getEnvironment(id).orElse(null);
if (cluster != null) {
beans.forEach(bean -> bean.init(cluster));
cluster.getRepositories().stream().map(r -> r.waitForInitialization(initialRepositoryLoadWaitTime,
repositoryLoadIdleTime, executorService)).forEach(futures::add);
}
}
for (CompletableFuture<?> future : futures) {
try {
future.get();
}
catch (InterruptedException e) {
return;
}
catch (ExecutionException e) {
log.error("Exception when waiting for Kafka repository initialization", e);
}
}
}
finally {
executorService.shutdown();
try {
executorService.awaitTermination(1, TimeUnit.MINUTES);
}
catch (InterruptedException e) {
log.warn("Thread has been interrupted while waiting for executor shutdown", e);
}
}
}
}
| 3,449 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaExecutorFactory.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaExecutorFactory.java | package com.hermesworld.ais.galapagos.kafka;
import java.util.concurrent.ExecutorService;
/**
* Interface of a factory for Executors which will be used for decoupling Kafka calls from the Kafka Thread. This is
* required for proper concatenation of <code>CompletableFuture</code>s, as otherwise, a second Kafka invocation after a
* first one could cause a deadlock within the Kafka Thread. <br>
* One Executor is created (and immediately shut down) for each Kafka Action which must be decoupled. <br>
* Implementations of this interface can e.g. provide special executors which deal with Thread-local static accessor
* classes like Spring Security's <code>SecurityContextHolder</code>.
*
* @author AlbrechtFlo
*
*/
public interface KafkaExecutorFactory {
/**
* Creates a new executor service for decoupling a single Kafka Task from the Kafka Thread. The executor is
* relatively short-lived, and it is okay to provide only one Thread, as no parallel execution occurs on this
* executor.
*
* @return A new executor, never <code>null</code>.
*/
ExecutorService newExecutor();
}
| 1,128 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaClusters.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaClusters.java | package com.hermesworld.ais.galapagos.kafka;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
public interface KafkaClusters {
List<KafkaEnvironmentConfig> getEnvironmentsMetadata();
Optional<KafkaEnvironmentConfig> getEnvironmentMetadata(String environmentId);
List<String> getEnvironmentIds();
String getProductionEnvironmentId();
Optional<KafkaCluster> getEnvironment(String environmentId);
/**
* Returns all known environments as a List. Each environment can be queried for its ID.
*
* @return All known environments as an unmodifiable list.
*/
default List<KafkaCluster> getEnvironments() {
return getEnvironmentIds().stream().map(id -> getEnvironment(id).orElse(null)).filter(Objects::nonNull)
.collect(Collectors.toList());
}
Optional<KafkaAuthenticationModule> getAuthenticationModule(String environmentId);
/**
* Returns a repository for saving and retrieving objects in a Kafka Topic. Repositories returned by this method can
* be used to store information for use across all clusters (best example is the repository for Application Owner
* Requests). Usually, this repository will be stored in the production Kafka cluster, although clients should not
* rely on this and leave the exact storage location up to the implementation.
*
* @param <T> Type of the objects to store in the repository.
* @param topicName Name of the repository. It will be prefixed with a globally configured prefix for determining
* the "real" Kafka Topic name.
* @param valueClass Class of the objects to store in the repository. Instances of the class must be serializable as
* JSON.
* @return A repository for saving and retrieving objects with a "global", i.e. cross-cluster, scope.
*/
<T extends HasKey> TopicBasedRepository<T> getGlobalRepository(String topicName, Class<T> valueClass);
/**
* Closes all connections to all Kafka clusters and releases all resources, e.g. Thread pools and executors.
*/
void dispose();
}
| 2,467 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicConfigEntry.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/TopicConfigEntry.java | package com.hermesworld.ais.galapagos.kafka;
/**
* Represents a single configuration for a topic. Is more or less equivalent to Kafka's <code>ConfigEntry</code> class,
* but here for decoupling Galapagos API from Kafka AdminClient API.
*
* @author AlbrechtFlo
*
*/
public interface TopicConfigEntry {
String getName();
String getValue();
boolean isDefault();
boolean isReadOnly();
boolean isSensitive();
}
| 439 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicCreateParams.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/TopicCreateParams.java | package com.hermesworld.ais.galapagos.kafka;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public final class TopicCreateParams {
private int numberOfPartitions;
private int replicationFactor;
private Map<String, String> topicConfigs;
public TopicCreateParams(int numberOfPartitions, int replicationFactor) {
this(numberOfPartitions, replicationFactor, Collections.emptyMap());
}
public TopicCreateParams(int numberOfPartitions, int replicationFactor, Map<String, String> topicConfigs) {
this.numberOfPartitions = numberOfPartitions;
this.replicationFactor = replicationFactor;
this.topicConfigs = new HashMap<>(topicConfigs);
}
public void setTopicConfig(String configKey, String configValue) {
topicConfigs.put(configKey, configValue);
}
public int getNumberOfPartitions() {
return numberOfPartitions;
}
public int getReplicationFactor() {
return replicationFactor;
}
/**
* Returns custom Kafka configuration properties for this topic.
*
* @return Custom Kafka configuration properties for this topic, maybe an empty map, but never <code>null</code>.
*/
public Map<String, String> getTopicConfigs() {
return Collections.unmodifiableMap(topicConfigs);
}
}
| 1,349 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaCluster.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaCluster.java | package com.hermesworld.ais.galapagos.kafka;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.acl.AclBinding;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
/**
* Representation of a live Kafka Cluster, i.e. a set of Kafka Brokers behaving as a single Kafka system. <br>
* This interface maps all "low-level" KafkaAdmin operations to higher-level calls, and provides arbitrary persistence
* based on internal topics. It does not apply rules or business logic to the operations, and does not check access
* rights. <br>
* A Kafka Cluster also fires events which can be caught by listeners e.g. for doing auditing logs or other
* cross-cutting operations. See <code>events</code> package for possible listener interfaces to implement.
*
* @author AlbrechtFlo
*
*/
public interface KafkaCluster {
String getId();
CompletableFuture<Void> updateUserAcls(KafkaUser user);
CompletableFuture<Void> removeUserAcls(KafkaUser user);
CompletableFuture<Void> visitAcls(Function<AclBinding, Boolean> callback);
<T extends HasKey> TopicBasedRepository<T> getRepository(String topicName, Class<T> valueClass);
Collection<TopicBasedRepository<?>> getRepositories();
CompletableFuture<Void> createTopic(String topicName, TopicCreateParams topicCreateParams);
CompletableFuture<Void> deleteTopic(String topicName);
CompletableFuture<Set<TopicConfigEntry>> getTopicConfig(String topicName);
CompletableFuture<Map<String, String>> getDefaultTopicConfig();
CompletableFuture<Void> setTopicConfig(String topicName, Map<String, String> configValues);
CompletableFuture<Integer> getActiveBrokerCount();
CompletableFuture<TopicCreateParams> buildTopicCreateParams(String topicName);
CompletableFuture<List<ConsumerRecord<String, String>>> peekTopicData(String topicName, int limit);
CompletableFuture<String> getKafkaServerVersion();
}
| 2,190 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaClusterAdminClient.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaClusterAdminClient.java | package com.hermesworld.ais.galapagos.kafka;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import java.util.Collection;
import java.util.Map;
/**
* Galapagos Interface for abstracting the not-so-helpful Kafka Admin interface. This allows for wrapping and e.g.
* KafkaFuture encapsulation. <br>
* Also, it is reduced to the relevant Kafka Admin operations for Galapagos.
*/
public interface KafkaClusterAdminClient {
KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters);
KafkaFuture<Void> createAcls(Collection<AclBinding> bindings);
KafkaFuture<Collection<AclBinding>> describeAcls(AclBindingFilter filter);
KafkaFuture<Void> createTopic(NewTopic topic);
KafkaFuture<Void> deleteTopic(String topicName);
KafkaFuture<Config> describeConfigs(ConfigResource resource);
KafkaFuture<Collection<Node>> describeCluster();
KafkaFuture<Void> incrementalAlterConfigs(ConfigResource resource, Map<String, String> configValues);
KafkaFuture<TopicDescription> describeTopic(String topicName);
}
| 1,406 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaSender.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaSender.java | package com.hermesworld.ais.galapagos.kafka;
import java.util.concurrent.CompletableFuture;
/**
* Galapagos-specific abstraction of the KafkaTemplate class of Spring (or a different sender implementation). This
* abstraction is used to be able to build a Thread-safe variant around the KafkaTemplate class (which causes some
* strange blockings when concatenating futures and sending from their completion stages). <br>
* Also, it allows for easier unit testing.
*
* @author AlbrechtFlo
*
*/
public interface KafkaSender {
CompletableFuture<Void> send(String topic, String key, String message);
}
| 613 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaUser.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/KafkaUser.java | package com.hermesworld.ais.galapagos.kafka;
import java.util.Collection;
import org.apache.kafka.common.acl.AclBinding;
public interface KafkaUser {
String getKafkaUserName();
Collection<AclBinding> getRequiredAclBindings();
}
| 242 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaEnvironmentConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/config/KafkaEnvironmentConfig.java | package com.hermesworld.ais.galapagos.kafka.config;
public interface KafkaEnvironmentConfig {
String getId();
String getName();
String getBootstrapServers();
/**
* If <code>true</code>, direct changes on this environment are not allowed (topics / subscriptions). You must use
* staging functionality from the preceding environment stage to get changes on this environment.
*
* @return <code>true</code> if no direct changes are allowed on this environment, <code>false</code> otherwise.
*/
boolean isStagingOnly();
String getAuthenticationMode();
/**
* If <code>true</code>, application ACLs will <b>not</b> be updated on this environment. You will have to provide
* external means to have correct ACLs in place. This can be useful e.g. in migration scenarios, where ACLs are
* synchronized outside Galapagos, and you do not want to override these based on e.g. yet incomplete metadata.
*
* @return <code>true</code> if application ACLs shall not be updated on this environment, <code>false</code>
* otherwise.
*/
boolean isNoUpdateApplicationAcls();
/**
* If <code>true</code>, developer authentications on this environment will receive not only all read access of
* their assigned applications, but also their write access. This can be useful for manual insertion of test data i
*
* @return <code>true</code> if developer authentications shall receive write access on this environment (depending
* on associated applications), <code>false</code> otherwise.
*/
boolean isDeveloperWriteAccess();
}
| 1,655 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DefaultAclConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/config/DefaultAclConfig.java | package com.hermesworld.ais.galapagos.kafka.config;
import lombok.Getter;
import lombok.Setter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourceType;
@Getter
@Setter
public class DefaultAclConfig {
private String name;
private ResourceType resourceType;
private PatternType patternType;
private AclOperation operation;
}
| 446 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaEnvironmentsConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/config/KafkaEnvironmentsConfig.java | package com.hermesworld.ais.galapagos.kafka.config;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthenticationModule;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.impl.KafkaEnvironmentConfigImpl;
import com.hermesworld.ais.galapagos.kafka.impl.ConnectedKafkaClusters;
import lombok.Getter;
import lombok.Setter;
import org.bouncycastle.operator.OperatorException;
import org.bouncycastle.pkcs.PKCSException;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Configuration
@ConfigurationProperties(prefix = "galapagos.kafka")
public class KafkaEnvironmentsConfig {
@Setter
private List<KafkaEnvironmentConfigImpl> environments = new ArrayList<>();
@Getter
@Setter
private String productionEnvironment;
@Getter
@Setter
private boolean logAdminOperations;
@Getter
@Setter
private Long adminClientRequestTimeout;
@Getter
@Setter
private String metadataTopicsPrefix;
@Getter
@Setter
private List<DefaultAclConfig> defaultAcls;
public List<KafkaEnvironmentConfig> getEnvironments() {
return new ArrayList<>(environments);
}
@Bean(destroyMethod = "dispose")
public KafkaClusters kafkaClusters(KafkaExecutorFactory executorFactory,
@Value("${galapagos.topics.standardReplicationFactor}") int replicationFactor)
throws IOException, PKCSException, OperatorException, GeneralSecurityException {
validateConfig();
Map<String, KafkaAuthenticationModule> authModules = environments.stream()
.collect(Collectors.toMap(env -> env.getId(), env -> buildAuthenticationModule(env)));
for (KafkaAuthenticationModule module : authModules.values()) {
module.init().join();
}
return new ConnectedKafkaClusters(new ArrayList<>(environments), authModules, productionEnvironment,
metadataTopicsPrefix, executorFactory, replicationFactor, logAdminOperations,
adminClientRequestTimeout);
}
private void validateConfig() {
if (environments.isEmpty()) {
throw new RuntimeException(
"No Kafka environments configured. Please configure at least one Kafka environment using galapagos.kafka.environments[0].<properties>");
}
if (productionEnvironment == null) {
throw new RuntimeException(
"No Kafka production environment configured. Please set property galapagos.kafka.production-environment.");
}
if (environments.stream().noneMatch(env -> productionEnvironment.equals(env.getId()))) {
throw new RuntimeException(
"No environment configuration given for production environment " + productionEnvironment);
}
}
private KafkaAuthenticationModule buildAuthenticationModule(KafkaEnvironmentConfigImpl envConfig) {
if ("ccloud".equals(envConfig.getAuthenticationMode())) {
return new ConfluentCloudAuthenticationModule(envConfig.getCcloud());
}
else if ("certificates".equals(envConfig.getAuthenticationMode())) {
return new CertificatesAuthenticationModule(envConfig.getId(), envConfig.getCertificates());
}
throw new IllegalArgumentException("Invalid authentication mode for environment " + envConfig.getId() + ": "
+ envConfig.getAuthenticationMode());
}
}
| 4,095 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaEnvironmentConfigImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/config/impl/KafkaEnvironmentConfigImpl.java | package com.hermesworld.ais.galapagos.kafka.config.impl;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthConfig;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import lombok.Getter;
import lombok.Setter;
@Setter
public class KafkaEnvironmentConfigImpl implements KafkaEnvironmentConfig {
@Getter(onMethod = @__({ @Override }))
private String id;
@Getter(onMethod = @__({ @Override }))
private String name;
@Getter(onMethod = @__({ @Override }))
private String bootstrapServers;
@Getter(onMethod = @__({ @Override }))
private boolean stagingOnly;
@Getter(onMethod = @__({ @Override }))
private boolean developerWriteAccess;
@Getter(onMethod = @__({ @Override }))
private boolean noUpdateApplicationAcls;
@Getter(onMethod = @__({ @Override }))
private String authenticationMode;
@Getter
private ConfluentCloudAuthConfig ccloud;
@Getter
private CertificatesAuthenticationConfig certificates;
}
| 1,110 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
EnvironmentsController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/controller/EnvironmentsController.java | package com.hermesworld.ais.galapagos.kafka.controller;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RestController;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
@RestController
public class EnvironmentsController {
private final KafkaClusters kafkaEnvironments;
public EnvironmentsController(KafkaClusters kafkaEnvironments) {
this.kafkaEnvironments = kafkaEnvironments;
}
@GetMapping(value = "/api/environments", produces = MediaType.APPLICATION_JSON_VALUE)
public List<KafkaEnvironmentDto> getEnvironments() {
return kafkaEnvironments.getEnvironmentsMetadata().stream().map(e -> toDto(e)).collect(Collectors.toList());
}
@GetMapping(value = "/api/environments/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE)
public List<KafkaEnvironmentLivenessDto> getEnvironmentLiveness(@PathVariable String environmentId) {
// TODO currently unsupported
return Collections.emptyList();
}
@GetMapping(value = "/api/environments/{environmentId}/kafkaversion", produces = MediaType.APPLICATION_JSON_VALUE)
public String getKafkaVersions(@PathVariable String environmentId) throws Exception {
return kafkaEnvironments.getEnvironment(environmentId).get().getKafkaServerVersion().get();
}
private KafkaEnvironmentDto toDto(KafkaEnvironmentConfig env) {
boolean production = env.getId().equals(kafkaEnvironments.getProductionEnvironmentId());
return new KafkaEnvironmentDto(env.getId(), env.getName(), env.getBootstrapServers(), production,
env.isStagingOnly(), env.getAuthenticationMode());
}
}
| 1,972 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaEnvironmentLivenessDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/controller/KafkaEnvironmentLivenessDto.java | package com.hermesworld.ais.galapagos.kafka.controller;
import lombok.Getter;
@Getter
public class KafkaEnvironmentLivenessDto {
private String server;
private boolean online;
public KafkaEnvironmentLivenessDto(String server, boolean online) {
this.server = server;
this.online = online;
}
}
| 330 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaEnvironmentDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/controller/KafkaEnvironmentDto.java | package com.hermesworld.ais.galapagos.kafka.controller;
import lombok.Getter;
@Getter
public class KafkaEnvironmentDto {
private final String id;
private final String name;
private final String bootstrapServers;
private final boolean production;
private final boolean stagingOnly;
private final String authenticationMode;
public KafkaEnvironmentDto(String id, String name, String bootstrapServers, boolean production, boolean stagingOnly,
String authenticationMode) {
this.id = id;
this.name = name;
this.bootstrapServers = bootstrapServers;
this.production = production;
this.stagingOnly = stagingOnly;
this.authenticationMode = authenticationMode;
}
}
| 756 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConnectedKafkaCluster.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/ConnectedKafkaCluster.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.*;
import com.hermesworld.ais.galapagos.kafka.util.KafkaTopicConfigHelper;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.acl.*;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.config.ConfigResource.Type;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import org.springframework.util.ObjectUtils;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;
@Slf4j
public class ConnectedKafkaCluster implements KafkaCluster {
private final String environmentId;
private KafkaClusterAdminClient adminClient;
private final KafkaRepositoryContainer repositoryContainer;
private final Map<String, TopicBasedRepository<?>> repositories = new ConcurrentHashMap<>();
private final KafkaConsumerFactory<String, String> kafkaConsumerFactory;
private final KafkaFutureDecoupler futureDecoupler;
private static final long MAX_POLL_TIME = Duration.ofSeconds(10).toMillis();
public ConnectedKafkaCluster(String environmentId, KafkaRepositoryContainer repositoryContainer,
KafkaClusterAdminClient adminClient, KafkaConsumerFactory<String, String> kafkaConsumerFactory,
KafkaFutureDecoupler futureDecoupler) {
this.environmentId = environmentId;
this.adminClient = adminClient;
this.repositoryContainer = repositoryContainer;
this.kafkaConsumerFactory = kafkaConsumerFactory;
this.futureDecoupler = futureDecoupler;
}
/**
* Convenience function to enable wrapping of the contained AdminClient, e.g. to intercept update calls within a
* "dry-run" operation.
*
* @param wrapperFn Function returning a new AdminClient object which should wrap the existing AdminClient (passed
* to the function). It is also valid to return the AdminClient object passed to this function.
*/
public void wrapAdminClient(Function<KafkaClusterAdminClient, KafkaClusterAdminClient> wrapperFn) {
this.adminClient = wrapperFn.apply(this.adminClient);
}
@Override
public String getId() {
return environmentId;
}
@Override
public CompletableFuture<Void> updateUserAcls(KafkaUser user) {
List<AclBinding> createAcls = new ArrayList<>();
return getUserAcls(user.getKafkaUserName()).thenCompose(acls -> {
List<AclBinding> targetAcls = new ArrayList<>(user.getRequiredAclBindings());
List<AclBinding> deleteAcls = new ArrayList<>(acls);
createAcls.addAll(targetAcls);
createAcls.removeAll(acls);
deleteAcls.removeAll(targetAcls);
return deleteAcls.isEmpty() ? CompletableFuture.completedFuture(null)
: toCompletableFuture(adminClient
.deleteAcls(deleteAcls.stream().map(acl -> acl.toFilter()).collect(Collectors.toList())));
}).thenCompose(o -> createAcls.isEmpty() ? CompletableFuture.completedFuture(null)
: toCompletableFuture(adminClient.createAcls(createAcls)));
}
@Override
public CompletableFuture<Void> removeUserAcls(KafkaUser user) {
String userName = user.getKafkaUserName();
if (userName == null) {
return FutureUtil.noop();
}
return toCompletableFuture(adminClient.deleteAcls(List.of(userAclFilter(userName, ResourceType.ANY))))
.thenApply(o -> null);
}
@Override
public CompletableFuture<Void> visitAcls(Function<AclBinding, Boolean> callback) {
return toCompletableFuture(adminClient.describeAcls(AclBindingFilter.ANY)).thenAccept(acls -> {
for (AclBinding acl : acls) {
if (!callback.apply(acl)) {
break;
}
}
});
}
@SuppressWarnings("unchecked")
@Override
public <T extends HasKey> TopicBasedRepository<T> getRepository(String topicName, Class<T> valueClass) {
return (TopicBasedRepository<T>) repositories.computeIfAbsent(topicName,
s -> repositoryContainer.addRepository(topicName, valueClass));
}
@Override
public Collection<TopicBasedRepository<?>> getRepositories() {
return new HashSet<>(repositories.values());
}
@Override
public CompletableFuture<Void> createTopic(String topicName, TopicCreateParams topicCreateParams) {
NewTopic newTopic = new NewTopic(topicName, topicCreateParams.getNumberOfPartitions(),
(short) topicCreateParams.getReplicationFactor()).configs(topicCreateParams.getTopicConfigs());
return toCompletableFuture(this.adminClient.createTopic(newTopic));
}
@Override
public CompletableFuture<Void> deleteTopic(String topicName) {
AclBindingFilter aclFilter = new AclBindingFilter(
new ResourcePatternFilter(ResourceType.TOPIC, topicName, PatternType.LITERAL),
new AccessControlEntryFilter(null, null, AclOperation.ANY, AclPermissionType.ANY));
KafkaFuture<Void> deleteTopicFuture = this.adminClient.deleteTopic(topicName);
return toCompletableFuture(deleteTopicFuture)
.thenCompose(o -> toCompletableFuture(adminClient.deleteAcls(Set.of(aclFilter)))).thenApply(o -> null);
}
@Override
public CompletableFuture<Set<TopicConfigEntry>> getTopicConfig(String topicName) {
ConfigResource cres = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
return toCompletableFuture(adminClient.describeConfigs(cres)).thenApply(config -> config.entries().stream()
.map(entry -> new TopicConfigEntryImpl(entry)).collect(Collectors.toSet()));
}
@Override
public CompletableFuture<Map<String, String>> getDefaultTopicConfig() {
return toCompletableFuture(adminClient.describeCluster()).thenCompose(nodes -> {
if (nodes.isEmpty()) {
return CompletableFuture.failedFuture(new KafkaException("No nodes in cluster"));
}
return toCompletableFuture(adminClient.describeConfigs(
new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(nodes.iterator().next().id()))));
}).thenApply(config -> KafkaTopicConfigHelper.getTopicDefaultValues(config));
}
@Override
public CompletableFuture<Void> setTopicConfig(String topicName, Map<String, String> configValues) {
return toCompletableFuture(adminClient
.incrementalAlterConfigs(new ConfigResource(ConfigResource.Type.TOPIC, topicName), configValues));
}
@Override
public CompletableFuture<Integer> getActiveBrokerCount() {
return toCompletableFuture(adminClient.describeCluster()).thenApply(nodes -> nodes.size());
}
@Override
public CompletableFuture<TopicCreateParams> buildTopicCreateParams(String topicName) {
return toCompletableFuture(adminClient.describeTopic(topicName))
.thenCompose(desc -> buildCreateTopicParams(desc));
}
@Override
public CompletableFuture<List<ConsumerRecord<String, String>>> peekTopicData(String topicName, int limit) {
CompletableFuture<List<ConsumerRecord<String, String>>> result = new CompletableFuture<>();
long startTime = System.currentTimeMillis();
Runnable r = () -> {
KafkaConsumer<String, String> consumer = kafkaConsumerFactory.newConsumer();
consumer.subscribe(Set.of(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
// TODO do we really want to do this? This way, Galapagos could be mis-used as a continuous read tool
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
for (Map.Entry<TopicPartition, Long> offset : endOffsets.entrySet()) {
consumer.seek(offset.getKey(), Math.max(0, offset.getValue() - limit));
}
}
});
List<ConsumerRecord<String, String>> records = new ArrayList<>();
while (!Thread.interrupted() && records.size() < limit
&& System.currentTimeMillis() - startTime < MAX_POLL_TIME) {
try {
ConsumerRecords<String, String> polled = consumer.poll(Duration.ofSeconds(1));
polled.forEach(rec -> {
if (records.size() < limit) {
records.add(rec);
}
});
}
catch (InterruptException | WakeupException e) {
break;
}
catch (KafkaException e) {
result.completeExceptionally(e);
try {
consumer.close();
}
catch (Throwable t) {
}
return;
}
}
try {
consumer.close();
}
catch (Throwable t) {
}
result.complete(records);
};
new Thread(r).start();
return result;
}
@Override
public CompletableFuture<String> getKafkaServerVersion() {
Function<String, String> toVersionString = s -> !s.contains("-") ? s : s.substring(0, s.indexOf('-'));
return toCompletableFuture(adminClient.describeCluster()).thenCompose(coll -> {
String nodeName = coll.iterator().next().idString();
return toCompletableFuture(adminClient.describeConfigs(new ConfigResource(Type.BROKER, nodeName)))
.thenApply(config -> config.get("inter.broker.protocol.version") == null ? "UNKNOWN_VERSION"
: config.get("inter.broker.protocol.version").value())
.thenApply(toVersionString);
});
}
private CompletableFuture<TopicCreateParams> buildCreateTopicParams(TopicDescription description) {
return getTopicConfig(description.name()).thenApply(configs -> {
TopicCreateParams params = new TopicCreateParams(description.partitions().size(),
description.partitions().get(0).replicas().size());
for (TopicConfigEntry config : configs) {
if (!config.isDefault() && !config.isSensitive()) {
params.setTopicConfig(config.getName(), config.getValue());
}
}
return params;
});
}
private CompletableFuture<Collection<AclBinding>> getUserAcls(String username) {
if (ObjectUtils.isEmpty(username)) {
return CompletableFuture.completedFuture(List.of());
}
return toCompletableFuture(adminClient.describeAcls(userAclFilter(username, ResourceType.ANY)));
}
private AclBindingFilter userAclFilter(String username, ResourceType resourceType) {
ResourcePatternFilter patternFilter = new ResourcePatternFilter(resourceType, null, PatternType.ANY);
AccessControlEntryFilter entryFilter = new AccessControlEntryFilter(username, null, AclOperation.ANY,
AclPermissionType.ANY);
return new AclBindingFilter(patternFilter, entryFilter);
}
private <T> CompletableFuture<T> toCompletableFuture(KafkaFuture<T> kafkaFuture) {
return futureDecoupler.toCompletableFuture(kafkaFuture);
}
}
| 12,774 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaRepositoryContainerImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaRepositoryContainerImpl.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaSender;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.TopicConfig;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.AuthorizationException;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.errors.WakeupException;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@Slf4j
public class KafkaRepositoryContainerImpl implements KafkaRepositoryContainer {
private final KafkaConsumer<String, String> consumer;
private final KafkaSender sender;
private final Map<String, TopicBasedRepositoryImpl<?>> repositories = new ConcurrentHashMap<>();
private final AtomicBoolean refreshSubscriptions = new AtomicBoolean();
private final AdminClient adminClient;
private final String environmentId;
private static final Duration POLL_DURATION = Duration.of(10, ChronoUnit.SECONDS);
private static final long CONSUMER_ERROR_WAIT_MILLIS = TimeUnit.SECONDS.toMillis(30);
private Thread consumerThread;
private final Object consumeSemaphore = new Object();
private final String prefix;
private final int replicationFactor;
public KafkaRepositoryContainerImpl(KafkaConnectionManager connectionManager, String environmentId,
String galapagosInternalPrefix, int replicationFactor) {
this.consumer = connectionManager.getConsumerFactory(environmentId).newConsumer();
this.sender = connectionManager.getKafkaSender(environmentId);
this.adminClient = connectionManager.getAdminClient(environmentId);
this.environmentId = environmentId;
this.prefix = galapagosInternalPrefix;
this.replicationFactor = replicationFactor;
this.consumerThread = new Thread(this::consume);
this.consumerThread.start();
}
public void dispose() {
if (this.consumerThread != null) {
if (this.repositories.isEmpty()) {
this.consumerThread.interrupt();
}
else {
// consumer thread will terminate by the wakeup
this.consumer.wakeup();
}
this.consumerThread = null;
}
}
@Override
public <T extends HasKey> TopicBasedRepository<T> addRepository(String topicName, Class<T> valueClass) {
String kafkaTopicName = prefix + topicName;
ensureTopicExists(kafkaTopicName);
TopicBasedRepositoryImpl<T> repository = new TopicBasedRepositoryImpl<>(kafkaTopicName, topicName, valueClass,
sender);
this.repositories.put(kafkaTopicName, repository);
refreshSubscriptions.set(true);
synchronized (consumeSemaphore) {
consumeSemaphore.notify();
}
return repository;
}
private void updateSubscriptions() {
consumer.subscribe(repositories.keySet(), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
log.debug("Environment " + environmentId + ": Consumer has been assigned to partitions " + partitions);
consumer.seekToBeginning(partitions);
}
});
}
private void ensureTopicExists(String topic) {
try {
Map<String, TopicDescription> desc;
try {
desc = this.adminClient.describeTopics(Set.of(topic)).allTopicNames().get();
}
catch (Exception e) {
desc = Collections.emptyMap();
}
if (desc.isEmpty()) {
log.info("Creating metadata topic " + topic + " on environment " + environmentId);
int nodeCount = this.adminClient.describeCluster().nodes().get().size();
int replicationFactor = Math.min(this.replicationFactor, nodeCount);
NewTopic newTopic = new NewTopic(topic, 1, (short) replicationFactor);
newTopic = newTopic
.configs(Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT));
this.adminClient.createTopics(Set.of(newTopic)).all().get();
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
catch (ExecutionException e) {
if (e.getCause() instanceof RuntimeException) {
throw (RuntimeException) e.getCause();
}
throw new RuntimeException(e.getCause());
}
}
private void consume() {
while (repositories.isEmpty() && !Thread.interrupted()) {
try {
synchronized (consumeSemaphore) {
consumeSemaphore.wait();
}
}
catch (InterruptedException e) {
return;
}
}
while (!Thread.interrupted()) {
if (refreshSubscriptions.getAndSet(false)) {
updateSubscriptions();
}
try {
long start = 0;
if (log.isTraceEnabled()) {
log.trace("Calling poll() on environment " + environmentId);
start = System.currentTimeMillis();
}
ConsumerRecords<String, String> records = consumer.poll(POLL_DURATION);
if (log.isTraceEnabled()) {
log.trace("poll() returned " + records.count() + " record(s) and took "
+ (System.currentTimeMillis() - start) + " ms");
}
for (ConsumerRecord<String, String> record : records) {
TopicBasedRepositoryImpl<?> repository = repositories.get(record.topic());
if (repository != null) {
repository.messageReceived(record.topic(), record.key(), record.value());
}
else {
log.warn("No handler found for message on topic " + record.topic());
}
}
}
catch (WakeupException e) {
// signal to close consumer!
consumer.close(Duration.ofSeconds(1));
return;
}
catch (InterruptException e) {
return;
}
catch (AuthenticationException | AuthorizationException e) {
log.error("Unrecoverable exception when polling Kafka consumer, will exit consumer Thread", e);
break;
}
catch (KafkaException e) {
log.error("Exception when polling Kafka consumer, will retry in 30 seconds", e);
try {
// noinspection BusyWait
Thread.sleep(CONSUMER_ERROR_WAIT_MILLIS);
}
catch (InterruptedException ie) {
break;
}
}
}
// clear interrupt flag, in an IntelliJ friendly way :-)
boolean interrupted = Thread.interrupted();
if (!interrupted || !Thread.currentThread().isInterrupted()) {
consumer.close(Duration.ofSeconds(1));
}
}
}
| 8,391 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicConfigEntryImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/TopicConfigEntryImpl.java | package com.hermesworld.ais.galapagos.kafka.impl;
import org.apache.kafka.clients.admin.ConfigEntry;
import com.hermesworld.ais.galapagos.kafka.TopicConfigEntry;
/**
* Implementation of the {@link TopicConfigEntry} interface which just wraps a Kafka {@link ConfigEntry} object and
* delegates all calls to the wrapped object.
*
* @author AlbrechtFlo
*
*/
final class TopicConfigEntryImpl implements TopicConfigEntry {
private ConfigEntry entry;
public TopicConfigEntryImpl(ConfigEntry kafkaConfigEntry) {
this.entry = kafkaConfigEntry;
}
@Override
public String getName() {
return entry.name();
}
@Override
public String getValue() {
return entry.value();
}
@Override
public boolean isDefault() {
return entry.isDefault();
}
@Override
public boolean isSensitive() {
return entry.isSensitive();
}
@Override
public boolean isReadOnly() {
return entry.isReadOnly();
}
}
| 1,004 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaRepositoryContainer.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaRepositoryContainer.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
public interface KafkaRepositoryContainer {
<T extends HasKey> TopicBasedRepository<T> addRepository(String topicName, Class<T> valueClass);
}
| 321 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaConnectionManager.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaConnectionManager.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaSender;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.Admin;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
@Slf4j
class KafkaConnectionManager {
private final Map<String, AdminClient> adminClients = new HashMap<>();
private final Map<String, KafkaSenderImpl> senders = new HashMap<>();
private final Map<String, KafkaConsumerFactory<String, String>> consumerFactories = new HashMap<>();
private final KafkaFutureDecoupler futureDecoupler;
private final Long adminClientRequestTimeout;
public KafkaConnectionManager(List<KafkaEnvironmentConfig> environments,
Map<String, KafkaAuthenticationModule> authenticationModules, KafkaFutureDecoupler futureDecoupler,
Long adminClientRequestTimeout) {
this.futureDecoupler = futureDecoupler;
this.adminClientRequestTimeout = adminClientRequestTimeout;
for (KafkaEnvironmentConfig env : environments) {
String id = env.getId();
log.debug("Creating Kafka Connections for " + id);
KafkaAuthenticationModule authModule = authenticationModules.get(id);
adminClients.put(id, buildAdminClient(env, authModule));
senders.put(id, buildKafkaSender(env, authModule));
consumerFactories.put(id, () -> buildConsumer(env, authModule));
}
}
public void dispose() {
adminClients.values().forEach(Admin::close);
adminClients.clear();
// Senders have no resources to close here
senders.clear();
}
public Set<String> getEnvironmentIds() {
return Collections.unmodifiableSet(adminClients.keySet());
}
public CompletableFuture<Map<String, Boolean>> getBrokerOnlineState(String environmentId) {
AdminClient client = adminClients.get(environmentId);
if (client == null) {
return CompletableFuture.completedFuture(Collections.emptyMap());
}
// TODO implement
return CompletableFuture.completedFuture(Collections.emptyMap());
}
public AdminClient getAdminClient(String environmentId) {
return adminClients.get(environmentId);
}
public KafkaSender getKafkaSender(String environmentId) {
return senders.get(environmentId);
}
public KafkaConsumerFactory<String, String> getConsumerFactory(String environmentId) {
return consumerFactories.get(environmentId);
}
private AdminClient buildAdminClient(KafkaEnvironmentConfig environment,
KafkaAuthenticationModule authenticationModule) {
Properties props = buildKafkaProperties(environment, authenticationModule);
if (adminClientRequestTimeout != null) {
props.setProperty(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(adminClientRequestTimeout));
props.setProperty(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG,
String.valueOf(adminClientRequestTimeout));
props.setProperty(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG,
String.valueOf(adminClientRequestTimeout));
}
return AdminClient.create(props);
}
private KafkaConsumer<String, String> buildConsumer(KafkaEnvironmentConfig environment,
KafkaAuthenticationModule authenticationModule) {
Properties props = buildKafkaProperties(environment, authenticationModule);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "galapagos." + UUID.randomUUID());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// consumer_offset is irrelevant for us, as we use a new consumer group ID in every Galapagos instance
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "10000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return new KafkaConsumer<>(props);
}
private KafkaSenderImpl buildKafkaSender(KafkaEnvironmentConfig environment,
KafkaAuthenticationModule authenticationModule) {
Properties props = buildKafkaProperties(environment, authenticationModule);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, "1");
props.put(ProducerConfig.BATCH_SIZE_CONFIG, "1"); // do not batch
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
ProducerFactory<String, String> factory = new DefaultKafkaProducerFactory<>(toMap(props));
return new KafkaSenderImpl(new KafkaTemplate<>(factory), futureDecoupler);
}
private Properties buildKafkaProperties(KafkaEnvironmentConfig environment,
KafkaAuthenticationModule authenticationModule) {
Properties props = new Properties();
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, environment.getBootstrapServers());
props.put(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG, "60000");
authenticationModule.addRequiredKafkaProperties(props);
return props;
}
private static Map<String, Object> toMap(Properties props) {
return props.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue));
}
}
| 6,442 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicBasedRepositoryImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/TopicBasedRepositoryImpl.java | package com.hermesworld.ais.galapagos.kafka.impl;
import java.io.IOException;
import java.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicReference;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.kafka.KafkaSender;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.LoggerFactory;
final class TopicBasedRepositoryImpl<T extends HasKey> implements TopicBasedRepository<T> {
// TODO currently, if T is a mutable type, callers could modify our internal storage from outside.
// The internal map should store the JSON object representation instead and freshly generate new
// objects on the fly. This could also imply to use a custom Iterator in getObjects() for performance
// optimization.
private final String topicName;
private final String kafkaTopicName;
private final Class<T> valueClass;
private final Map<String, T> data = new ConcurrentHashMap<>();
private final ObjectMapper objectMapper = JsonUtil.newObjectMapper();
private final KafkaSender sender;
private final AtomicReference<Runnable> messageReceivedHook = new AtomicReference<>();
public TopicBasedRepositoryImpl(String kafkaTopicName, String topicName, Class<T> valueClass, KafkaSender sender) {
// fail-fast for null values
if (kafkaTopicName == null) {
throw new IllegalArgumentException("kafkaTopicName must not be null");
}
if (topicName == null) {
throw new IllegalArgumentException("topicName must not be null");
}
if (valueClass == null) {
throw new IllegalArgumentException("valueClass must not be null");
}
if (sender == null) {
throw new IllegalArgumentException("sender must not be null");
}
this.kafkaTopicName = kafkaTopicName;
this.topicName = topicName;
this.valueClass = valueClass;
this.sender = sender;
}
public final void messageReceived(String topicName, String messageKey, String message) {
if (!this.kafkaTopicName.equals(topicName)) {
return;
}
Runnable r = messageReceivedHook.get();
if (r != null) {
r.run();
}
JSONObject obj = new JSONObject(message);
if (obj.optBoolean("deleted")) {
data.remove(messageKey);
return;
}
try {
data.put(messageKey, objectMapper.readValue(obj.getJSONObject("obj").toString(), valueClass));
}
catch (JSONException | IOException e) {
LoggerFactory.getLogger(getClass()).error("Could not parse object from Kafka message", e);
}
}
@Override
public final Class<T> getValueClass() {
return valueClass;
}
@Override
public final String getTopicName() {
return topicName;
}
@Override
public final boolean containsObject(String id) {
return data.containsKey(id);
}
@Override
public final Optional<T> getObject(String id) {
return Optional.ofNullable(data.get(id));
}
@Override
public final Collection<T> getObjects() {
return Collections.unmodifiableCollection(data.values());
}
@Override
public CompletableFuture<Void> save(T value) {
try {
JSONObject message = new JSONObject();
message.put("obj", new JSONObject(objectMapper.writeValueAsString(value)));
String key = value.key();
data.put(key, value);
return sender.send(kafkaTopicName, key, message.toString());
}
catch (JSONException | JsonProcessingException e) {
return CompletableFuture.failedFuture(e);
}
}
@Override
public CompletableFuture<Void> delete(T value) {
JSONObject message = new JSONObject();
message.put("deleted", true);
String key = value.key();
data.remove(key);
return sender.send(kafkaTopicName, key, message.toString());
}
@Override
public CompletableFuture<Void> waitForInitialization(Duration initialWaitTime, Duration idleTime,
ScheduledExecutorService executorService) {
CompletableFuture<Void> result = new CompletableFuture<>();
AtomicReference<ScheduledFuture<?>> idleFuture = new AtomicReference<>();
Runnable completed = () -> {
messageReceivedHook.set(null);
result.complete(null);
};
Runnable restartIdleTimer = () -> {
ScheduledFuture<?> f = idleFuture.get();
if (f != null) {
f.cancel(false);
}
idleFuture.set(executorService.schedule(completed, idleTime.toMillis(), TimeUnit.MILLISECONDS));
};
Runnable r = () -> {
restartIdleTimer.run();
messageReceivedHook.set(restartIdleTimer);
};
executorService.schedule(r, initialWaitTime.toMillis(), TimeUnit.MILLISECONDS);
return result;
}
}
| 5,427 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConnectedKafkaClusters.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/ConnectedKafkaClusters.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusterAdminClient;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.util.LoggingAdminClient;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
import org.springframework.util.ObjectUtils;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
public class ConnectedKafkaClusters implements KafkaClusters {
private List<KafkaEnvironmentConfig> environmentMetadata;
private final Map<String, ConnectedKafkaCluster> clusters = new ConcurrentHashMap<>();
private final String productionEnvironmentId;
private final Map<String, KafkaAuthenticationModule> authenticationModules;
private final Set<KafkaRepositoryContainerImpl> repoContainers = new HashSet<>();
private final KafkaConnectionManager connectionManager;
public ConnectedKafkaClusters(List<KafkaEnvironmentConfig> environmentMetadata,
Map<String, KafkaAuthenticationModule> authenticationModules, String productionEnvironmentId,
String galapagosInternalPrefix, KafkaExecutorFactory executorFactory, int topicRepositoryReplicationFactor,
boolean logging, Long adminClientRequestTimeout) {
this.environmentMetadata = environmentMetadata;
this.productionEnvironmentId = productionEnvironmentId;
this.authenticationModules = authenticationModules;
KafkaFutureDecoupler futureDecoupler = new KafkaFutureDecoupler(executorFactory);
this.connectionManager = new KafkaConnectionManager(environmentMetadata, authenticationModules, futureDecoupler,
adminClientRequestTimeout);
for (KafkaEnvironmentConfig envMeta : environmentMetadata) {
KafkaRepositoryContainerImpl repoContainer = new KafkaRepositoryContainerImpl(connectionManager,
envMeta.getId(), galapagosInternalPrefix, topicRepositoryReplicationFactor);
ConnectedKafkaCluster cluster = buildConnectedKafkaCluster(envMeta.getId(), connectionManager,
repoContainer, futureDecoupler, logging);
clusters.put(envMeta.getId(), cluster);
repoContainers.add(repoContainer);
}
}
@Override
public void dispose() {
connectionManager.dispose();
repoContainers.forEach(KafkaRepositoryContainerImpl::dispose);
clusters.clear();
environmentMetadata = Collections.emptyList();
}
@Override
public List<KafkaEnvironmentConfig> getEnvironmentsMetadata() {
return environmentMetadata;
}
@Override
public Optional<KafkaEnvironmentConfig> getEnvironmentMetadata(String environmentId) {
// TODO a map would be more optimized
return environmentMetadata.stream().filter(env -> environmentId.equals(env.getId())).findFirst();
}
@Override
public List<String> getEnvironmentIds() {
return environmentMetadata.stream().map(KafkaEnvironmentConfig::getId).collect(Collectors.toList());
}
@Override
public String getProductionEnvironmentId() {
return productionEnvironmentId;
}
@Override
public Optional<KafkaCluster> getEnvironment(String environmentId) {
if (ObjectUtils.isEmpty(environmentId)) {
return Optional.empty();
}
return Optional.ofNullable(clusters.get(environmentId));
}
@Override
public <T extends HasKey> TopicBasedRepository<T> getGlobalRepository(String topicName, Class<T> valueClass) {
KafkaCluster cluster = getEnvironment(getProductionEnvironmentId()).orElse(null);
if (cluster == null) {
throw new RuntimeException("Internal error: No Kafka cluster instance for production environment found");
}
return cluster.getRepository(topicName, valueClass);
}
@Override
public Optional<KafkaAuthenticationModule> getAuthenticationModule(String environmentId) {
return Optional.ofNullable(authenticationModules.get(environmentId));
}
private static ConnectedKafkaCluster buildConnectedKafkaCluster(String environmentId,
KafkaConnectionManager connectionManager, KafkaRepositoryContainer repositoryContainer,
KafkaFutureDecoupler futureDecoupler, boolean logging) {
KafkaClusterAdminClient adminClient = new DefaultKafkaClusterAdminClient(
connectionManager.getAdminClient(environmentId));
if (logging) {
adminClient = new LoggingAdminClient(environmentId, adminClient);
}
return new ConnectedKafkaCluster(environmentId, repositoryContainer, adminClient,
connectionManager.getConsumerFactory(environmentId), futureDecoupler);
}
}
| 5,171 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DefaultKafkaClusterAdminClient.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/DefaultKafkaClusterAdminClient.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaClusterAdminClient;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class DefaultKafkaClusterAdminClient implements KafkaClusterAdminClient {
private final Admin admin;
public DefaultKafkaClusterAdminClient(Admin admin) {
this.admin = admin;
}
@Override
public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) {
return admin.deleteAcls(filters).all();
}
@Override
public KafkaFuture<Void> createAcls(Collection<AclBinding> bindings) {
return admin.createAcls(bindings).all();
}
@Override
public KafkaFuture<Collection<AclBinding>> describeAcls(AclBindingFilter filter) {
return admin.describeAcls(filter).values();
}
@Override
public KafkaFuture<Void> createTopic(NewTopic topic) {
return admin.createTopics(Set.of(topic)).all();
}
@Override
public KafkaFuture<Void> deleteTopic(String topicName) {
return admin.deleteTopics(Set.of(topicName)).all();
}
@Override
public KafkaFuture<Config> describeConfigs(ConfigResource resource) {
return admin.describeConfigs(Set.of(resource)).values().getOrDefault(resource,
KafkaFuture.completedFuture(new Config(Set.of())));
}
@Override
public KafkaFuture<Collection<Node>> describeCluster() {
return admin.describeCluster().nodes();
}
@Override
public KafkaFuture<TopicDescription> describeTopic(String topicName) {
return admin.describeTopics(Set.of(topicName)).topicNameValues().get(topicName);
}
@Override
public KafkaFuture<Void> incrementalAlterConfigs(ConfigResource resource, Map<String, String> configValues) {
List<AlterConfigOp> alterOps = configValues.entrySet().stream().map(entry -> {
if (entry.getValue() == null) {
return new AlterConfigOp(new ConfigEntry(entry.getKey(), null), AlterConfigOp.OpType.DELETE);
}
return new AlterConfigOp(new ConfigEntry(entry.getKey(), entry.getValue()), AlterConfigOp.OpType.SET);
}).toList();
return admin.incrementalAlterConfigs(Map.of(resource, alterOps)).all();
}
}
| 2,622 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaSenderImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaSenderImpl.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaSender;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.kafka.core.KafkaTemplate;
import java.util.concurrent.CompletableFuture;
/**
* Wraps a KafkaTemplate to make concatenated Futures Thread-safe.
*
* @author AlbrechtFlo
*
*/
public class KafkaSenderImpl implements KafkaSender {
private final KafkaTemplate<String, String> kafkaTemplate;
private final KafkaFutureDecoupler futureDecoupler;
public KafkaSenderImpl(KafkaTemplate<String, String> template, KafkaFutureDecoupler futureDecoupler) {
this.kafkaTemplate = template;
this.futureDecoupler = futureDecoupler;
}
@Override
public CompletableFuture<Void> send(String topic, String key, String message) {
return futureDecoupler.toCompletableFuture(kafkaTemplate.send(new ProducerRecord<>(topic, key, message)))
.thenApply(o -> null);
}
}
| 1,007 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaConsumerFactory.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaConsumerFactory.java | package com.hermesworld.ais.galapagos.kafka.impl;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.KafkaException;
public interface KafkaConsumerFactory<K, V> {
KafkaConsumer<K, V> newConsumer() throws KafkaException;
}
| 266 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaFutureDecoupler.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaFutureDecoupler.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
import org.apache.kafka.common.KafkaFuture;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
/**
* Helper class which decouples the completion of {@link KafkaFuture} or {@link CompletableFuture} instances from the
* main Kafka Thread.
*
* @author AlbrechtFlo
*/
public class KafkaFutureDecoupler {
private final KafkaExecutorFactory executorFactory;
public KafkaFutureDecoupler(KafkaExecutorFactory executorFactory) {
this.executorFactory = executorFactory;
}
/**
* Returns a {@link CompletableFuture} which completes when the given {@link KafkaFuture} completes. If the
* <code>KafkaFuture</code> is already complete, a completed Future is returned. Otherwise, the returned Future
* completes on a Thread provided by a fresh <code>ExecutorService</code> of the <code>KafkaExecutorFactory</code>
* provided for this helper class.
*
* @param <T> Type of the value provided by the Future.
* @param future Future which may be complete, or which may complete on the Kafka Thread.
*
* @return A completable Future which may be already complete if the original Future already was complete, or which
* completes on a Thread decoupled from the Kafka Thread.
*/
public <T> CompletableFuture<T> toCompletableFuture(KafkaFuture<T> future) {
return decouple(kafkaFutureToCompletableFuture(future));
}
/**
* Returns a {@link CompletableFuture} which completes when the given {@link CompletableFuture} completes. If the
* <code>ListenableFuture</code> is already complete, a completed Future is returned. Otherwise, the returned Future
* completes on a Thread provided by a fresh <code>ExecutorService</code> of the <code>KafkaExecutorFactory</code>
* provided for this helper class.
*
* @param <T> Type of the value provided by the Future.
* @param completableFuture Future which may be complete, or which may complete on the Kafka Thread.
*
* @return A completable Future which may be already complete if the original Future already was complete, or which
* completes on a Thread decoupled from the Kafka Thread.
*/
public <T> CompletableFuture<T> toCompletableFuture(CompletableFuture<T> completableFuture) {
return decouple(completableFuture);
}
private <T> CompletableFuture<T> decouple(CompletableFuture<T> completableFuture) {
if (completableFuture.isDone()) {
return completableFuture;
}
CompletableFuture<T> result = new CompletableFuture<>();
ExecutorService executor = executorFactory.newExecutor();
completableFuture.whenComplete((res, throwable) -> {
try {
executor.submit(() -> {
if (throwable != null) {
result.completeExceptionally(throwable);
}
else {
result.complete(res);
}
});
}
finally {
executor.shutdown();
}
});
return result;
}
private <T> CompletableFuture<T> kafkaFutureToCompletableFuture(KafkaFuture<T> future) {
CompletableFuture<T> result = new CompletableFuture<>();
future.whenComplete((res, t) -> {
if (t != null) {
result.completeExceptionally(t);
}
else {
result.complete(res);
}
});
return result;
}
}
| 3,725 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CreateAuthenticationResult.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/auth/CreateAuthenticationResult.java | package com.hermesworld.ais.galapagos.kafka.auth;
import lombok.Getter;
import org.json.JSONObject;
@Getter
public class CreateAuthenticationResult {
private final JSONObject publicAuthenticationData;
private final byte[] privateAuthenticationData;
public CreateAuthenticationResult(JSONObject publicAuthenticationData, byte[] privateAuthenticationData) {
this.publicAuthenticationData = publicAuthenticationData;
this.privateAuthenticationData = new byte[privateAuthenticationData.length];
System.arraycopy(privateAuthenticationData, 0, this.privateAuthenticationData, 0,
privateAuthenticationData.length);
}
}
| 673 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaAuthenticationModule.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/auth/KafkaAuthenticationModule.java | package com.hermesworld.ais.galapagos.kafka.auth;
import org.json.JSONException;
import org.json.JSONObject;
import javax.annotation.CheckReturnValue;
import java.time.Instant;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
/**
* Interface of modules being able to provide (create, delete) authentication data (e.g. user name / password) for
* access to a Kafka Cluster. Each {@link com.hermesworld.ais.galapagos.kafka.KafkaCluster} provides one authentication
* module, based on its configuration.
*/
public interface KafkaAuthenticationModule {
@CheckReturnValue
CompletableFuture<Void> init();
@CheckReturnValue
CompletableFuture<CreateAuthenticationResult> createApplicationAuthentication(String applicationId,
String applicationNormalizedName, JSONObject createParameters);
@CheckReturnValue
CompletableFuture<CreateAuthenticationResult> updateApplicationAuthentication(String applicationId,
String applicationNormalizedName, JSONObject createParameters, JSONObject existingAuthData);
@CheckReturnValue
CompletableFuture<Void> deleteApplicationAuthentication(String applicationId, JSONObject existingAuthData);
void addRequiredKafkaProperties(Properties kafkaProperties);
/**
* Returns the Kafka username which represents the given application or developer from the given authentication data
* which have been created by this module. The return value <b>must</b> include the <code>User:</code> prefix.
*
* @param existingAuthData Authentication data stored for the application or developer, which have been created by
* this module.
*
* @return The Kafka username for the given application or developer, never <code>null</code>.
*
* @throws JSONException If authentication data could not be parsed, or if the username could not be determined from
* the authentication data.
*/
String extractKafkaUserName(JSONObject existingAuthData) throws JSONException;
Optional<Instant> extractExpiryDate(JSONObject existingAuthData) throws JSONException;
@CheckReturnValue
CompletableFuture<CreateAuthenticationResult> createDeveloperAuthentication(String userName,
JSONObject createParams);
@CheckReturnValue
CompletableFuture<Void> deleteDeveloperAuthentication(String userName, JSONObject existingAuthData);
}
| 2,483 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
InitPerCluster.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/util/InitPerCluster.java | package com.hermesworld.ais.galapagos.kafka.util;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
/**
* Interface for components which want to "init" something per connected Kafka Cluster on startup. Usually, this means
* getting their required topic based repository for the first time, so the repository starts receiving async data and
* is caught by the startup listener which waits for all repositories until they have received initial data.
*/
public interface InitPerCluster {
/**
* Called during Galapagos startup, once for each connected Kafka cluster.
*
* @param cluster Cluster to perform startup initialization on (e.g., initialize a repository).
*/
void init(KafkaCluster cluster);
}
| 741 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
AclSupport.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/util/AclSupport.java | package com.hermesworld.ais.galapagos.kafka.util;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentsConfig;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.stream.Collectors;
/**
* Singleton Component helping with recurring tasks regarding Kafka ACLs; mainly, calculating required ACLs for given
* applications and environments.
*/
@Component
public class AclSupport {
private static final List<AclOperationAndType> READ_TOPIC_OPERATIONS = Arrays.asList(allow(AclOperation.READ),
allow(AclOperation.DESCRIBE_CONFIGS));
private static final List<AclOperationAndType> WRITE_TOPIC_OPERATIONS = Arrays.asList(allow(AclOperation.ALL),
denyDelete());
private final KafkaEnvironmentsConfig kafkaConfig;
private final TopicService topicService;
private final SubscriptionService subscriptionService;
public AclSupport(KafkaEnvironmentsConfig kafkaConfig, TopicService topicService,
SubscriptionService subscriptionService) {
this.kafkaConfig = kafkaConfig;
this.topicService = topicService;
this.subscriptionService = subscriptionService;
}
public Collection<AclBinding> getRequiredAclBindings(String environmentId, ApplicationMetadata applicationMetadata,
String kafkaUserName, boolean readOnly) {
Set<AclBinding> result = new HashSet<>();
String applicationId = applicationMetadata.getApplicationId();
// add configured default ACLs, if any
if (kafkaConfig.getDefaultAcls() != null) {
result.addAll(kafkaConfig.getDefaultAcls().stream()
.map(acl -> new AclBinding(
new ResourcePattern(acl.getResourceType(), acl.getName(), acl.getPatternType()),
new AccessControlEntry(kafkaUserName, "*", acl.getOperation(), AclPermissionType.ALLOW)))
.collect(Collectors.toList()));
}
List<AclOperationAndType> internalTopicOps = readOnly ? READ_TOPIC_OPERATIONS : ALLOW_ALL;
result.addAll(applicationMetadata.getInternalTopicPrefixes().stream()
.flatMap(prefix -> prefixAcls(kafkaUserName, ResourceType.TOPIC, prefix, internalTopicOps).stream())
.collect(Collectors.toList()));
if (!readOnly) {
result.addAll(applicationMetadata.getTransactionIdPrefixes().stream().flatMap(
prefix -> prefixAcls(kafkaUserName, ResourceType.TRANSACTIONAL_ID, prefix, ALLOW_ALL).stream())
.collect(Collectors.toList()));
result.addAll(applicationMetadata.getConsumerGroupPrefixes().stream()
.flatMap(prefix -> prefixAcls(kafkaUserName, ResourceType.GROUP, prefix, ALLOW_ALL).stream())
.collect(Collectors.toList()));
}
// topics OWNED by the application
topicService.listTopics(environmentId).stream().filter(
topic -> topic.getType() != TopicType.INTERNAL && (applicationId.equals(topic.getOwnerApplicationId())
|| (topic.getProducers() != null && topic.getProducers().contains(applicationId))))
.map(topic -> topicAcls(kafkaUserName, topic.getName(),
readOnly ? READ_TOPIC_OPERATIONS : WRITE_TOPIC_OPERATIONS))
.forEach(result::addAll);
// topics SUBSCRIBED by the application
subscriptionService.getSubscriptionsOfApplication(environmentId, applicationId, false).stream()
.map(sub -> topicAcls(kafkaUserName, sub.getTopicName(),
topicService.getTopic(environmentId, sub.getTopicName())
.map(t -> (t.getType() == TopicType.COMMANDS && !readOnly) ? WRITE_TOPIC_OPERATIONS
: READ_TOPIC_OPERATIONS)
.orElse(Collections.emptyList())))
.forEach(result::addAll);
return result;
}
/**
* "Simplifies" (reduces) the given set of ACLs. For example, if there are two identical ACLs, one allows ALL for a
* resource pattern and principal, and one allows READ for the very same resource pattern and principal, the READ
* ACL can safely be removed.
*
* @param aclBindings Set of ACL Bindings to simplify.
* @return Simplified (potentially identical) set of ACL Bindings.
*/
public Collection<AclBinding> simplify(Collection<AclBinding> aclBindings) {
Set<ResourcePatternAndPrincipal> allowedAllPatterns = aclBindings.stream()
.filter(acl -> acl.entry().permissionType() == AclPermissionType.ALLOW
&& acl.entry().operation() == AclOperation.ALL)
.map(acl -> new ResourcePatternAndPrincipal(acl.pattern(), acl.entry().principal()))
.collect(Collectors.toSet());
if (allowedAllPatterns.isEmpty()) {
return aclBindings;
}
return aclBindings.stream()
.filter(acl -> acl.entry().operation() == AclOperation.ALL
|| acl.entry().permissionType() == AclPermissionType.DENY
|| !allowedAllPatterns
.contains(new ResourcePatternAndPrincipal(acl.pattern(), acl.entry().principal())))
.collect(Collectors.toSet());
}
private Collection<AclBinding> prefixAcls(String userName, ResourceType resourceType, String prefix,
List<AclOperationAndType> ops) {
return ops.stream().map(op -> op.toBinding(prefix, resourceType, PatternType.PREFIXED, userName))
.collect(Collectors.toList());
}
private Collection<AclBinding> topicAcls(String userName, String topicName, List<AclOperationAndType> ops) {
return ops.stream().map(op -> op.toBinding(topicName, ResourceType.TOPIC, PatternType.LITERAL, userName))
.collect(Collectors.toList());
}
private static class AclOperationAndType {
private final AclOperation operation;
private final AclPermissionType permissionType;
private AclOperationAndType(AclOperation operation, AclPermissionType permissionType) {
this.operation = operation;
this.permissionType = permissionType;
}
public AclBinding toBinding(String resourceName, ResourceType resourceType, PatternType patternType,
String principal) {
return new AclBinding(new ResourcePattern(resourceType, resourceName, patternType),
new AccessControlEntry(principal, "*", operation, permissionType));
}
}
private static class ResourcePatternAndPrincipal {
private final ResourcePattern resourcePattern;
private final String principal;
private ResourcePatternAndPrincipal(ResourcePattern resourcePattern, String principal) {
this.resourcePattern = resourcePattern;
this.principal = principal;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ResourcePatternAndPrincipal that = (ResourcePatternAndPrincipal) obj;
return resourcePattern.equals(that.resourcePattern) && principal.equals(that.principal);
}
@Override
public int hashCode() {
return Objects.hash(resourcePattern, principal);
}
}
private static AclOperationAndType allow(AclOperation op) {
return new AclOperationAndType(op, AclPermissionType.ALLOW);
}
private static AclOperationAndType denyDelete() {
return new AclOperationAndType(AclOperation.DELETE, AclPermissionType.DENY);
}
private static final List<AclOperationAndType> ALLOW_ALL = List.of(allow(AclOperation.ALL));
}
| 8,656 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaTopicConfigHelper.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/util/KafkaTopicConfigHelper.java | package com.hermesworld.ais.galapagos.kafka.util;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.common.config.TopicConfig;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* Helper class for dealing with Kafka Topic Config properties. This class contains lots of information from the Kafka
* docs, e.g. default values for some properties, and property precedence (e.g. <code>log.retention.hours</code> vs.
* <code>log.retention.ms</code>...)
*
* @author AlbrechtFlo
*
*/
@SuppressWarnings("deprecation")
public class KafkaTopicConfigHelper {
private static final Map<String, ConfigValueInfo> CONFIG_INFOS = new LinkedHashMap<>();
private static final Map<String, List<SecondaryServerProp>> SECONDARY_SERVER_PROPS = new HashMap<>();
private static final String INFINITE_MS = "9223372036854775807";
private static final Function<String, String> hoursToMillis = (hours) -> String
.valueOf(TimeUnit.HOURS.toMillis(Long.valueOf(hours, 10)));
static {
CONFIG_INFOS.put(TopicConfig.CLEANUP_POLICY_CONFIG,
new ConfigValueInfo("delete", "log.cleanup.policy", TopicConfig.CLEANUP_POLICY_DOC));
CONFIG_INFOS.put(TopicConfig.RETENTION_MS_CONFIG,
new ConfigValueInfo("604800000", "log.retention.ms", TopicConfig.RETENTION_MS_DOC));
CONFIG_INFOS.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG,
new ConfigValueInfo("0", "log.cleaner.min.compaction.lag.ms", TopicConfig.MIN_COMPACTION_LAG_MS_DOC));
CONFIG_INFOS.put(TopicConfig.DELETE_RETENTION_MS_CONFIG, new ConfigValueInfo("86400000",
"log.cleaner.delete.retention.ms", TopicConfig.DELETE_RETENTION_MS_DOC));
CONFIG_INFOS.put(TopicConfig.COMPRESSION_TYPE_CONFIG,
new ConfigValueInfo("producer", "compression.type", TopicConfig.COMPRESSION_TYPE_DOC));
CONFIG_INFOS.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG,
new ConfigValueInfo("60000", "log.segment.delete.delay.ms", TopicConfig.FILE_DELETE_DELAY_MS_DOC));
CONFIG_INFOS.put(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, new ConfigValueInfo(INFINITE_MS,
"log.flush.interval.messages", TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC));
CONFIG_INFOS.put(TopicConfig.FLUSH_MS_CONFIG,
new ConfigValueInfo(INFINITE_MS, "log.flush.interval.ms", TopicConfig.FLUSH_MS_DOC));
CONFIG_INFOS.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG,
new ConfigValueInfo("4096", "log.index.interval.bytes", TopicConfig.INDEX_INTERVAL_BYTES_DOC));
CONFIG_INFOS.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG,
new ConfigValueInfo("1000012", "message.max.bytes", TopicConfig.MAX_MESSAGE_BYTES_DOC));
CONFIG_INFOS.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG,
new ConfigValueInfo(null, "log.message.format.version", TopicConfig.MESSAGE_FORMAT_VERSION_DOC));
CONFIG_INFOS.put(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, new ConfigValueInfo(INFINITE_MS,
"log.message.timestamp.difference.max.ms", TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC));
CONFIG_INFOS.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, new ConfigValueInfo("CreateTime",
"log.message.timestamp.type", TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC));
CONFIG_INFOS.put(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, new ConfigValueInfo("0.5",
"log.cleaner.min.cleanable.ratio", TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC));
CONFIG_INFOS.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG,
new ConfigValueInfo("1", "min.insync.replicas", TopicConfig.MIN_IN_SYNC_REPLICAS_DOC));
CONFIG_INFOS.put(TopicConfig.PREALLOCATE_CONFIG,
new ConfigValueInfo("false", "log.preallocate", TopicConfig.PREALLOCATE_DOC));
CONFIG_INFOS.put(TopicConfig.RETENTION_BYTES_CONFIG,
new ConfigValueInfo("-1", "log.retention.bytes", TopicConfig.RETENTION_BYTES_DOC));
CONFIG_INFOS.put(TopicConfig.SEGMENT_BYTES_CONFIG,
new ConfigValueInfo("1073741824", "log.segment.bytes", TopicConfig.SEGMENT_BYTES_DOC));
CONFIG_INFOS.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG,
new ConfigValueInfo("10485760", "log.index.size.max.bytes", TopicConfig.SEGMENT_INDEX_BYTES_DOC));
CONFIG_INFOS.put(TopicConfig.SEGMENT_JITTER_MS_CONFIG,
new ConfigValueInfo("0", "log.roll.jitter.ms", TopicConfig.SEGMENT_JITTER_MS_DOC));
CONFIG_INFOS.put(TopicConfig.SEGMENT_MS_CONFIG,
new ConfigValueInfo("604800000", "log.roll.ms", TopicConfig.SEGMENT_MS_DOC));
CONFIG_INFOS.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, new ConfigValueInfo("false",
"unclean.leader.election.enable", TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC));
CONFIG_INFOS.put(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, new ConfigValueInfo("true",
"log.message.downconversion.enable", TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_DOC));
SECONDARY_SERVER_PROPS
.put(TopicConfig.RETENTION_MS_CONFIG,
Arrays.asList(
new SecondaryServerProp("log.retention.minutes",
(minutes) -> String
.valueOf(TimeUnit.MINUTES.toMillis(Long.valueOf(minutes, 10)))),
new SecondaryServerProp("log.retention.hours", hoursToMillis)));
SECONDARY_SERVER_PROPS.put(TopicConfig.SEGMENT_MS_CONFIG,
List.of(new SecondaryServerProp("log.roll.hours", hoursToMillis)));
SECONDARY_SERVER_PROPS.put(TopicConfig.SEGMENT_JITTER_MS_CONFIG,
List.of(new SecondaryServerProp("log.roll.jitter.hours", hoursToMillis)));
}
private KafkaTopicConfigHelper() {
}
public static Map<String, String> getConfigKeysAndDescription() {
Map<String, String> result = new LinkedHashMap<>();
CONFIG_INFOS.forEach((key, value) -> result.put(key, value.description()));
return result;
}
public static Map<String, String> getTopicDefaultValues(Config brokerConfig) {
Map<String, String> brokerConfigValues = new HashMap<>();
// have to use forEach instead of Map collector because values could be null
brokerConfig.entries().forEach(entry -> brokerConfigValues.put(entry.name(), entry.value()));
Map<String, String> result = new HashMap<>();
for (Map.Entry<String, ConfigValueInfo> entry : CONFIG_INFOS.entrySet()) {
String configKey = entry.getKey();
ConfigValueInfo info = entry.getValue();
String serverDefault = null;
if (brokerConfigValues.containsKey(info.serverDefaultProperty())) {
serverDefault = brokerConfigValues.get(info.serverDefaultProperty());
}
if (serverDefault == null && SECONDARY_SERVER_PROPS.containsKey(configKey)) {
for (SecondaryServerProp prop : SECONDARY_SERVER_PROPS.get(configKey)) {
if (brokerConfigValues.get(prop.configName()) != null) {
serverDefault = prop.apply(brokerConfigValues.get(prop.configName()));
break;
}
}
}
result.put(configKey, serverDefault == null ? info.defaultValue() : serverDefault);
}
return result;
}
private record ConfigValueInfo(String defaultValue, String serverDefaultProperty, String description) {
}
private record SecondaryServerProp(String configName, Function<String, String> mappingFunction) {
public String apply(String baseConfigValue) {
return mappingFunction.apply(baseConfigValue);
}
}
}
| 7,956 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
LoggingAdminClient.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/util/LoggingAdminClient.java | package com.hermesworld.ais.galapagos.kafka.util;
import com.hermesworld.ais.galapagos.kafka.KafkaClusterAdminClient;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
@Slf4j
public class LoggingAdminClient implements KafkaClusterAdminClient {
private final String clusterId;
private final KafkaClusterAdminClient delegate;
public LoggingAdminClient(String clusterId, KafkaClusterAdminClient delegate) {
if (delegate instanceof LoggingAdminClient) {
throw new IllegalArgumentException("Cannot create a logging Admin Client on a logging Admin Client");
}
this.clusterId = clusterId;
this.delegate = delegate;
}
@Override
public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) {
return logOperation("deleteAcls", filters, () -> delegate.deleteAcls(filters));
}
@Override
public KafkaFuture<Void> createAcls(Collection<AclBinding> bindings) {
return logOperation("createAcls", bindings, () -> delegate.createAcls(bindings));
}
@Override
public KafkaFuture<Collection<AclBinding>> describeAcls(AclBindingFilter filter) {
return logOperation("describeAcls", filter, () -> delegate.describeAcls(filter));
}
@Override
public KafkaFuture<Void> createTopic(NewTopic topic) {
return logOperation("createTopic", topic, () -> delegate.createTopic(topic));
}
@Override
public KafkaFuture<Void> deleteTopic(String topicName) {
return logOperation("deleteTopic", topicName, () -> delegate.deleteTopic(topicName));
}
@Override
public KafkaFuture<Config> describeConfigs(ConfigResource resource) {
return logOperation("describeConfigs", resource, () -> delegate.describeConfigs(resource));
}
@Override
public KafkaFuture<Collection<Node>> describeCluster() {
return logOperation("describeCluster", "cluster", () -> delegate.describeCluster());
}
@Override
public KafkaFuture<Void> incrementalAlterConfigs(ConfigResource resource, Map<String, String> configValues) {
return logOperation("incrementalAlterConfigs", Set.of(resource, configValues),
() -> delegate.incrementalAlterConfigs(resource, configValues));
}
@Override
public KafkaFuture<TopicDescription> describeTopic(String topicName) {
return logOperation("describeTopic", topicName, () -> delegate.describeTopic(topicName));
}
private <T> KafkaFuture<T> logOperation(String opText, Object logKey, Supplier<KafkaFuture<T>> future) {
long startTime = System.currentTimeMillis();
log.info("Kafka AdminClient Call on cluster {}: {} ({})", clusterId, opText, logKey);
return future.get().whenComplete((v, t) -> logFutureComplete(opText, logKey, t, startTime));
}
private void logFutureComplete(String opText, Object logKey, Throwable error, long startTime) {
long totalTime = System.currentTimeMillis() - startTime;
if (error != null) {
log.error("Kafka operation {} for {} FAILED after {} ms", opText, logKey, totalTime, error);
}
else {
log.info("Kafka operation {} for {} COMPLETE after {} ms", opText, logKey, totalTime);
}
}
}
| 3,761 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicBasedRepository.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/kafka/util/TopicBasedRepository.java | package com.hermesworld.ais.galapagos.kafka.util;
import java.time.Duration;
import java.util.Collection;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import com.hermesworld.ais.galapagos.util.HasKey;
/**
* Interface for Repositories (logical data stores) based on a Kafka Topic. Implementations will use the JSON
* representation of objects stored in the repository to persist the information to Kafka, so chosen repository value
* types must be serializable as JSON.
*
* @param <T> Type of the objects being stored in this repository. The type must implement the {@link HasKey} interface
* and be serializable as and deserializable from JSON.
*/
public interface TopicBasedRepository<T extends HasKey> {
/**
* Checks if this repository contains an object with the given ID / key. "Contains" means that at least one record
* exists (on the Kafka Topic) representing this object, and the latest record is not marked as "deleted".
*
* @param id ID of the object, as returned by its {@link HasKey#key()} method.
* @return <code>true</code> if such an object exists in this repository, or <code>false</code> otherwise.
*/
boolean containsObject(String id);
/**
* Returns an Optional containing the object stored for the given ID in this repository, or an empty Optional if no
* such object exists in this repository.
*
* @param id ID of the object, as returned by its {@link HasKey#key()} method.
* @return An Optional, either containing the found object, or an empty Optional.
*/
Optional<T> getObject(String id);
/**
* Returns all non-deleted objects currently stored in this repository.
*
* @return A (possibly empty) collection containing all non-deleted objects currently stored in this repository,
* never <code>null</code>.
*/
Collection<T> getObjects();
/**
* Stores the given object in this repository. If another object with the same ID (as returned by
* {@link HasKey#key()}) already exists, it is replaced with the new value. The Kafka Topic is updated accordingly.
*
* @param value Object to store in this repository.
* @return A Completable Future which completes when the repository and the Kafka Topic both have been updated
* successfully, or which completes exceptionally if the Kafka Topic could not be updated.
*/
CompletableFuture<Void> save(T value);
/**
* Deletes the given object from this repository. The Kafka Topic is updated accordingly. <br>
* Deletion is done based on the <i>key</i> returned by the passed value, so no <code>equals()</code> check is done
* to lookup the object. Only if no object with such key exists in this repository, this is a no-op.
*
* @param value Object to delete from this repository.
* @return A Completable Future which completes when the repository and the Kafka Topic both have been updated
* successfully, or which completes exceptionally if the Kafka Topic could not be updated.
*/
CompletableFuture<Void> delete(T value);
/**
* Returns the (short) name of the Kafka topic where the data for this repository is stored in. To get the real
* Kafka topic name, you have to prepend the configured prefix for Galapagos Metadata topics.
*
* @return The (short) name of the Kafka topic where the data for this repository is stored in, never
* <code>null</code>.
*/
String getTopicName();
/**
* Returns the type of objects stored in this repository.
*
* @return The type of objects stored in this repository.
*/
Class<T> getValueClass();
/**
* Waits for the repository to be "initialized", that is, has received initial data from the Kafka Cluster. Due to
* the asynchronous, stream-based logic of Kafka data, this can only be determined by specifying two duration
* values: First, the time to wait until the first data record will most likely have arrived in the client; second,
* the time which will be assumed as "idle time": If no more records arrive in this timeframe, the initialization is
* considered "complete" and the returned Future is completed. <br>
* Both values have to be selected wisely, as the waiting time for an empty topic will still be
* <code>initialWaitTime + idleTime</code>.
*
* @param initialWaitTime Initial time to wait regardless of any incoming messages.
* @param idleTime Maximum allowed time for no new messages coming in before the initialization is considered
* "complete".
* @param executorService Executor service to schedule waiting tasks on. Also the completion of the Future will
* occur on a Thread of this executor (<b>never</b> on the calling Thread of this function).
* @return A future which completes once the repository is considered "initialized" according to above rules. It
* will <b>always</b> complete on a Thread of the given <code>executorService</code>.
*/
CompletableFuture<Void> waitForInitialization(Duration initialWaitTime, Duration idleTime,
ScheduledExecutorService executorService);
}
| 5,372 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DevAuthenticationMetadata.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/DevAuthenticationMetadata.java | package com.hermesworld.ais.galapagos.devauth;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.Getter;
import lombok.Setter;
import java.util.Objects;
@JsonSerialize
@Getter
@Setter
public class DevAuthenticationMetadata implements HasKey {
private String userName;
private String authenticationJson;
@Override
public String key() {
return userName;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
DevAuthenticationMetadata metadata = (DevAuthenticationMetadata) o;
return Objects.equals(userName, metadata.userName)
&& Objects.equals(authenticationJson, metadata.authenticationJson);
}
@Override
public int hashCode() {
return Objects.hash(userName);
}
}
| 961 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationService.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/DeveloperAuthenticationService.java | package com.hermesworld.ais.galapagos.devauth;
import javax.annotation.CheckReturnValue;
import java.io.OutputStream;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
public interface DeveloperAuthenticationService {
Optional<DevAuthenticationMetadata> getDeveloperAuthenticationOfCurrentUser(String environmentId);
@CheckReturnValue
CompletableFuture<DevAuthenticationMetadata> createDeveloperAuthenticationForCurrentUser(String environmentId,
OutputStream outputStreamForSecret);
@CheckReturnValue
CompletableFuture<Integer> clearExpiredDeveloperAuthenticationsOnAllClusters();
/**
* Returns all known (and currently valid) developer authentications for the given Kafka Cluster.
*
* @param environmentId ID of the environment (Kafka Cluster) to return all valid developer authentications for.
* @return A (possibly empty) list of developer authentications.
*/
List<DevAuthenticationMetadata> getAllDeveloperAuthentications(String environmentId);
}
| 1,071 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationInfoDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/controller/DeveloperAuthenticationInfoDto.java | package com.hermesworld.ais.galapagos.devauth.controller;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonFormat.Shape;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import java.time.Instant;
@JsonSerialize
@Getter
public class DeveloperAuthenticationInfoDto {
private final String dn;
@JsonFormat(shape = Shape.STRING)
private final Instant expiresAt;
public DeveloperAuthenticationInfoDto(String dn, Instant expiresAt) {
this.dn = dn;
this.expiresAt = expiresAt;
}
}
| 600 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/controller/DeveloperAuthenticationController.java | package com.hermesworld.ais.galapagos.devauth.controller;
import com.hermesworld.ais.galapagos.applications.controller.AuthenticationDto;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentApiException;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthUtil;
import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata;
import com.hermesworld.ais.galapagos.devauth.DeveloperAuthenticationService;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONObject;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.server.ResponseStatusException;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.security.cert.CertificateException;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.ExecutionException;
@RestController
@Slf4j
public class DeveloperAuthenticationController {
private final DeveloperAuthenticationService authService;
private final CurrentUserService userService;
private final KafkaClusters kafkaClusters;
public DeveloperAuthenticationController(DeveloperAuthenticationService authService, CurrentUserService userService,
KafkaClusters kafkaClusters) {
this.authService = authService;
this.userService = userService;
this.kafkaClusters = kafkaClusters;
}
@PostMapping(value = "/api/me/certificates/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE)
public DeveloperCertificateDto createDeveloperCertificate(@PathVariable String environmentId) {
String userName = userService.getCurrentUserName()
.orElseThrow(() -> new ResponseStatusException(HttpStatus.UNAUTHORIZED));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
authService.createDeveloperAuthenticationForCurrentUser(environmentId, baos).get();
return new DeveloperCertificateDto(userName + "_" + environmentId + ".p12",
Base64.getEncoder().encodeToString(baos.toByteArray()));
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
}
@PostMapping(value = "/api/me/apikey/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE)
public DeveloperApiKeyDto createDeveloperApiKey(@PathVariable String environmentId) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
DevAuthenticationMetadata metadata = authService
.createDeveloperAuthenticationForCurrentUser(environmentId, baos).get();
return new DeveloperApiKeyDto(ConfluentCloudAuthUtil.getApiKey(metadata.getAuthenticationJson()),
baos.toString(StandardCharsets.UTF_8),
ConfluentCloudAuthUtil.getExpiresAt(metadata.getAuthenticationJson()));
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
}
@GetMapping(value = "/api/me/authentications/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE)
public DeveloperAuthenticationsDto getDeveloperAuthenticationInfo(@PathVariable String environmentId) {
Map<String, AuthenticationDto> devAuthPerEnv = new HashMap<>();
for (KafkaEnvironmentConfig env : kafkaClusters.getEnvironmentsMetadata()) {
DevAuthenticationMetadata metadata = authService.getDeveloperAuthenticationOfCurrentUser(environmentId)
.orElse(null);
if (metadata != null && metadata.getAuthenticationJson() != null) {
AuthenticationDto dto = new AuthenticationDto();
dto.setAuthenticationType(env.getAuthenticationMode());
dto.setAuthentication(new JSONObject(metadata.getAuthenticationJson()).toMap());
devAuthPerEnv.put(env.getId(), dto);
}
}
DeveloperAuthenticationsDto result = new DeveloperAuthenticationsDto();
result.setAuthentications(devAuthPerEnv);
return result;
}
private ResponseStatusException handleExecutionException(ExecutionException e) {
Throwable t = e.getCause();
if (t instanceof CertificateException) {
return new ResponseStatusException(HttpStatus.BAD_REQUEST, t.getMessage());
}
if (t instanceof ConfluentApiException) {
return new ResponseStatusException(HttpStatus.BAD_GATEWAY, t.getMessage());
}
if (t instanceof NoSuchElementException) {
return new ResponseStatusException(HttpStatus.NOT_FOUND);
}
if ((t instanceof IllegalStateException) || (t instanceof IllegalArgumentException)) {
return new ResponseStatusException(HttpStatus.BAD_REQUEST, t.getMessage());
}
log.error("Unhandled exception in DeveloperAuthenticationController", t);
return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
| 5,766 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationsDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/controller/DeveloperAuthenticationsDto.java | package com.hermesworld.ais.galapagos.devauth.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.applications.controller.AuthenticationDto;
import lombok.Getter;
import lombok.Setter;
import java.util.Map;
@JsonSerialize
@Getter
@Setter
public class DeveloperAuthenticationsDto {
private Map<String, AuthenticationDto> authentications;
}
| 409 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperApiKeyDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/controller/DeveloperApiKeyDto.java | package com.hermesworld.ais.galapagos.devauth.controller;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import lombok.Setter;
import java.time.Instant;
@JsonSerialize
@Getter
@Setter
public class DeveloperApiKeyDto {
private final String apiKey;
private final String secret;
@JsonFormat(shape = JsonFormat.Shape.STRING)
private final Instant expiresAt;
public DeveloperApiKeyDto(String apiKey, String secret, Instant expiresAt) {
this.apiKey = apiKey;
this.secret = secret;
this.expiresAt = expiresAt;
}
}
| 654 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperCertificateDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/controller/DeveloperCertificateDto.java | package com.hermesworld.ais.galapagos.devauth.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
@JsonSerialize
@Getter
public class DeveloperCertificateDto {
private final String fileName;
private final String fileContentsBase64;
public DeveloperCertificateDto(String fileName, String fileContentsBase64) {
this.fileName = fileName;
this.fileContentsBase64 = fileContentsBase64;
}
}
| 469 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DevUserAclListener.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/impl/DevUserAclListener.java | package com.hermesworld.ais.galapagos.devauth.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.util.AclSupport;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.TimeService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.common.acl.AclBinding;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import javax.annotation.CheckReturnValue;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@Component
@Slf4j
public class DevUserAclListener implements TopicEventsListener, SubscriptionEventsListener, ApplicationEventsListener {
private final ApplicationsService applicationsService;
private final SubscriptionService subscriptionService;
private final TimeService timeService;
private final AclSupport aclSupport;
private final KafkaClusters kafkaClusters;
public DevUserAclListener(ApplicationsService applicationsService, SubscriptionService subscriptionService,
TimeService timeService, AclSupport aclSupport, KafkaClusters kafkaClusters) {
this.applicationsService = applicationsService;
this.subscriptionService = subscriptionService;
this.timeService = timeService;
this.aclSupport = aclSupport;
this.kafkaClusters = kafkaClusters;
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleApplicationRegistered(ApplicationEvent event) {
KafkaCluster cluster = event.getContext().getKafkaCluster();
String applicationId = event.getMetadata().getApplicationId();
return updateAcls(cluster, getValidDevAuthenticationsForApplication(cluster, applicationId));
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationAuthenticationChangeEvent event) {
return handleApplicationRegistered(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequestEvent event) {
return handleApplicationOwnerRequestUpdated(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequestEvent event) {
KafkaCluster cluster = event.getContext().getKafkaCluster();
return updateAcls(cluster, getValidDevAuthenticationForUser(cluster, event.getRequest().getUserName()));
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequestEvent event) {
return handleApplicationOwnerRequestUpdated(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event) {
KafkaCluster cluster = event.getContext().getKafkaCluster();
String applicationId = event.getMetadata().getClientApplicationId();
return updateAcls(cluster, getValidDevAuthenticationsForApplication(cluster, applicationId));
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event) {
return handleSubscriptionCreated(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event) {
return handleSubscriptionCreated(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) {
KafkaCluster cluster = event.getContext().getKafkaCluster();
Set<String> applicationIds = subscriptionService
.getSubscriptionsForTopic(cluster.getId(), event.getMetadata().getName(), true).stream()
.map(s -> s.getClientApplicationId()).collect(Collectors.toSet());
CompletableFuture<Void> result = FutureUtil.noop();
for (String appId : applicationIds) {
ApplicationMetadata appMeta = applicationsService.getApplicationMetadata(cluster.getId(), appId)
.orElse(null);
if (appMeta != null) {
result = result.thenCompose(
o -> updateAcls(cluster, getValidDevAuthenticationsForApplication(cluster, appId)));
}
}
return result;
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) {
Set<DevAuthenticationMetadata> validDevCertificatesForApplication = new HashSet<>(
getValidDevAuthenticationsForApplication(event.getContext().getKafkaCluster(),
event.getProducerApplicationId()));
return updateAcls(event.getContext().getKafkaCluster(), validDevCertificatesForApplication);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) {
return updateAcls(event.getContext().getKafkaCluster(), getValidDevAuthenticationsForApplication(
event.getContext().getKafkaCluster(), event.getProducerApplicationId()));
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) {
return FutureUtil.noop();
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) {
return handleTopicDeleted(event);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) {
KafkaCluster cluster = event.getContext().getKafkaCluster();
String applicationId = event.getMetadata().getOwnerApplicationId();
Set<String> clientApplicationIds = subscriptionService
.getSubscriptionsForTopic(cluster.getId(), event.getMetadata().getName(), false).stream()
.map(s -> s.getClientApplicationId()).collect(Collectors.toSet());
Set<DevAuthenticationMetadata> allAuthentications = Stream.of(Set.of(applicationId), clientApplicationIds)
.flatMap(s -> s.stream()).flatMap(id -> getValidDevAuthenticationsForApplication(cluster, id).stream())
.collect(Collectors.toSet());
return allAuthentications.isEmpty() ? FutureUtil.noop() : updateAcls(cluster, allAuthentications);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) {
return FutureUtil.noop();
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) {
return FutureUtil.noop();
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) {
return FutureUtil.noop();
}
@Override
@CheckReturnValue
public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) {
return FutureUtil.noop();
}
@CheckReturnValue
CompletableFuture<Void> updateAcls(KafkaCluster cluster, Set<DevAuthenticationMetadata> metadatas) {
if (log.isDebugEnabled()) {
log.debug("Updating ACLs for {} on cluster {}", metadatas.stream().map(m -> m.getUserName()).toList(),
cluster.getId());
}
CompletableFuture<Void> result = CompletableFuture.completedFuture(null);
for (DevAuthenticationMetadata metadata : metadatas) {
result = result.thenCompose(
o -> cluster.updateUserAcls(new DevAuthenticationKafkaUser(metadata, cluster.getId())));
}
return result;
}
@CheckReturnValue
CompletableFuture<Void> removeAcls(KafkaCluster cluster, Set<DevAuthenticationMetadata> metadatas) {
if (log.isDebugEnabled()) {
log.debug("Removing ACLs for {} on cluster {}",
metadatas.stream().map(m -> m.getUserName()).collect(Collectors.toList()), cluster.getId());
}
CompletableFuture<Void> result = CompletableFuture.completedFuture(null);
for (DevAuthenticationMetadata metadata : metadatas) {
result = result.thenCompose(
o -> cluster.removeUserAcls(new DevAuthenticationKafkaUser(metadata, cluster.getId())));
}
return result;
}
private Set<DevAuthenticationMetadata> getValidDevAuthenticationsForApplication(KafkaCluster cluster,
String applicationId) {
Set<String> userNames = applicationsService.getAllApplicationOwnerRequests().stream()
.filter(req -> req.getState() == RequestState.APPROVED && applicationId.equals(req.getApplicationId()))
.map(req -> req.getUserName()).collect(Collectors.toSet());
return DeveloperAuthenticationServiceImpl.getRepository(cluster).getObjects().stream()
.filter(dev -> isValid(dev, cluster) && userNames.contains(dev.getUserName()))
.collect(Collectors.toSet());
}
private Set<DevAuthenticationMetadata> getValidDevAuthenticationForUser(KafkaCluster cluster, String userName) {
return DeveloperAuthenticationServiceImpl.getRepository(cluster).getObjects().stream()
.filter(dev -> isValid(dev, cluster) && userName.equals(dev.getUserName())).collect(Collectors.toSet());
}
private boolean isValid(DevAuthenticationMetadata metadata, KafkaCluster cluster) {
JSONObject json = new JSONObject(metadata.getAuthenticationJson());
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(cluster.getId()).orElse(null);
if (authModule == null) {
return false;
}
return authModule.extractExpiryDate(json).isPresent()
&& authModule.extractExpiryDate(json).get().isAfter(timeService.getTimestamp().toInstant());
}
private class DevAuthenticationKafkaUser implements KafkaUser {
private final DevAuthenticationMetadata metadata;
private final String environmentId;
public DevAuthenticationKafkaUser(DevAuthenticationMetadata metadata, String environmentId) {
this.metadata = metadata;
this.environmentId = environmentId;
}
@Override
public String getKafkaUserName() {
JSONObject authData;
try {
authData = new JSONObject(metadata.getAuthenticationJson());
return kafkaClusters.getAuthenticationModule(environmentId).map(m -> m.extractKafkaUserName(authData))
.orElse(null);
}
catch (JSONException e) {
LoggerFactory.getLogger(DevUserAclListener.class).warn(
"Could not parse authentication JSON of developer authentication for user {}",
metadata.getUserName(), e);
return null;
}
}
@Override
public Collection<AclBinding> getRequiredAclBindings() {
boolean writeAccess = kafkaClusters.getEnvironmentMetadata(environmentId)
.map(m -> m.isDeveloperWriteAccess()).orElse(false);
return aclSupport.simplify(getApplicationsOfUser(metadata.getUserName(), environmentId).stream()
.map(a -> aclSupport.getRequiredAclBindings(environmentId, a, getKafkaUserName(), !writeAccess))
.flatMap(c -> c.stream()).collect(Collectors.toSet()));
}
private Set<ApplicationMetadata> getApplicationsOfUser(String userName, String environmentId) {
return applicationsService.getAllApplicationOwnerRequests().stream()
.filter(req -> req.getState() == RequestState.APPROVED && userName.equals(req.getUserName()))
.map(req -> applicationsService.getApplicationMetadata(environmentId, req.getApplicationId())
.orElse(null))
.filter(m -> m != null).collect(Collectors.toSet());
}
}
}
| 13,089 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationServiceImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/devauth/impl/DeveloperAuthenticationServiceImpl.java | package com.hermesworld.ais.galapagos.devauth.impl;
import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata;
import com.hermesworld.ais.galapagos.devauth.DeveloperAuthenticationService;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.util.InitPerCluster;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.TimeService;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONObject;
import org.springframework.stereotype.Component;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
@Component
@Slf4j
public class DeveloperAuthenticationServiceImpl implements DeveloperAuthenticationService, InitPerCluster {
private final KafkaClusters kafkaClusters;
private final CurrentUserService currentUserService;
private final DevUserAclListener aclUpdater;
private final TimeService timeService;
public DeveloperAuthenticationServiceImpl(KafkaClusters kafkaClusters, CurrentUserService currentUserService,
DevUserAclListener aclUpdater, TimeService timeService) {
this.kafkaClusters = kafkaClusters;
this.currentUserService = currentUserService;
this.aclUpdater = aclUpdater;
this.timeService = timeService;
}
@Override
public void init(KafkaCluster cluster) {
getRepository(cluster).getObjects();
}
@Override
public CompletableFuture<DevAuthenticationMetadata> createDeveloperAuthenticationForCurrentUser(
String environmentId, OutputStream outputStreamForSecret) {
String userName = currentUserService.getCurrentUserName().orElse(null);
if (userName == null) {
return FutureUtil.noUser();
}
KafkaCluster cluster = kafkaClusters.getEnvironment(environmentId).orElse(null);
if (cluster == null) {
return FutureUtil.noSuchEnvironment(environmentId);
}
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(environmentId).orElse(null);
if (authModule == null) {
return FutureUtil.noSuchEnvironment(environmentId);
}
TopicBasedRepository<DevAuthenticationMetadata> repository = getRepository(cluster);
CompletableFuture<Void> removeFuture = repository.getObject(userName)
.map(oldMeta -> aclUpdater.removeAcls(cluster, Set.of(oldMeta)).thenCompose(o -> authModule
.deleteDeveloperAuthentication(userName, new JSONObject(oldMeta.getAuthenticationJson()))))
.orElse(FutureUtil.noop());
return removeFuture.thenCompose(o -> authModule.createDeveloperAuthentication(userName, new JSONObject()))
.thenCompose(result -> saveMetadata(cluster, userName, result).thenApply(meta -> {
byte[] secretData = result.getPrivateAuthenticationData();
if (secretData == null) {
log.error("No secret data for developer authentication returned by generation");
return null;
}
try {
outputStreamForSecret.write(secretData);
}
catch (IOException e) {
log.warn("Could not write secret data of developer authentication to output stream", e);
}
return meta;
}).thenCompose(meta -> meta == null
? CompletableFuture.failedFuture(new NoSuchElementException("No authentication received"))
: aclUpdater.updateAcls(cluster, Set.of(meta))
.thenCompose(o -> clearExpiredDeveloperAuthenticationsOnAllClusters())
.thenApply(o -> meta)));
}
@Override
public Optional<DevAuthenticationMetadata> getDeveloperAuthenticationOfCurrentUser(String environmentId) {
String userName = currentUserService.getCurrentUserName().orElse(null);
KafkaCluster cluster = kafkaClusters.getEnvironment(environmentId).orElse(null);
if (userName == null || cluster == null) {
return Optional.empty();
}
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(environmentId).orElse(null);
if (authModule == null) {
return Optional.empty();
}
DevAuthenticationMetadata metadata = getRepository(cluster).getObject(userName).orElse(null);
if (metadata == null || isExpired(metadata, authModule)) {
return Optional.empty();
}
return Optional.of(metadata);
}
@Override
public CompletableFuture<Integer> clearExpiredDeveloperAuthenticationsOnAllClusters() {
CompletableFuture<Void> result = FutureUtil.noop();
AtomicInteger totalClearedDevAuthentications = new AtomicInteger();
for (KafkaCluster cluster : kafkaClusters.getEnvironments()) {
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(cluster.getId()).orElse(null);
if (authModule == null) {
return FutureUtil.noSuchEnvironment(cluster.getId());
}
Set<DevAuthenticationMetadata> expiredDevAuthentications = getRepository(cluster).getObjects().stream()
.filter(devAuth -> isExpired(devAuth, authModule)).collect(Collectors.toSet());
result = result.thenCompose(o -> aclUpdater.removeAcls(cluster, expiredDevAuthentications));
for (DevAuthenticationMetadata devAuth : expiredDevAuthentications) {
JSONObject authJson = new JSONObject(devAuth.getAuthenticationJson());
result = result
.thenCompose(o -> authModule.deleteDeveloperAuthentication(devAuth.getUserName(), authJson))
.thenCompose(o -> getRepository(cluster).delete(devAuth));
totalClearedDevAuthentications.incrementAndGet();
}
}
return result.thenApply(o -> totalClearedDevAuthentications.get());
}
@Override
public List<DevAuthenticationMetadata> getAllDeveloperAuthentications(String environmentId) {
KafkaCluster cluster = kafkaClusters.getEnvironment(environmentId).orElse(null);
if (cluster == null) {
return List.of();
}
KafkaAuthenticationModule authModule = kafkaClusters.getAuthenticationModule(cluster.getId()).orElseThrow();
return getRepository(cluster).getObjects().stream().filter(metadata -> !isExpired(metadata, authModule))
.collect(Collectors.toList());
}
private boolean isExpired(DevAuthenticationMetadata metadata, KafkaAuthenticationModule authModule) {
JSONObject authJson = new JSONObject(metadata.getAuthenticationJson());
return authModule.extractExpiryDate(authJson).map(dt -> dt.isBefore(timeService.getTimestamp().toInstant()))
.orElse(false);
}
private CompletableFuture<DevAuthenticationMetadata> saveMetadata(KafkaCluster cluster, String userName,
CreateAuthenticationResult result) {
DevAuthenticationMetadata metadata = toMetadata(userName, result);
return getRepository(cluster).save(toMetadata(userName, result)).thenApply(o -> metadata);
}
static TopicBasedRepository<DevAuthenticationMetadata> getRepository(KafkaCluster cluster) {
return cluster.getRepository("devauth", DevAuthenticationMetadata.class);
}
private DevAuthenticationMetadata toMetadata(String userName, CreateAuthenticationResult result) {
DevAuthenticationMetadata metadata = new DevAuthenticationMetadata();
metadata.setUserName(userName);
metadata.setAuthenticationJson(result.getPublicAuthenticationData().toString());
return metadata;
}
}
| 8,515 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationParams.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/notifications/NotificationParams.java | package com.hermesworld.ais.galapagos.notifications;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public final class NotificationParams {
private String templateName;
private Map<String, Object> variables = new HashMap<>();
public NotificationParams(String templateName) {
this.templateName = templateName;
}
public void addVariable(String name, Object value) {
variables.put(name, value);
}
public String getTemplateName() {
return templateName;
}
public Map<String, Object> getVariables() {
return Collections.unmodifiableMap(variables);
}
}
| 658 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationService.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/notifications/NotificationService.java | package com.hermesworld.ais.galapagos.notifications;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import java.util.concurrent.CompletableFuture;
/**
* Interface of a component which is able to send notification for defined user sets. <br>
* Galapagos Components should not access the implementation of this interface directly, as all notifications are now
* sent out by {@link com.hermesworld.ais.galapagos.notifications.impl.NotificationEventListener}, which uses this
* component.
*/
public interface NotificationService {
/**
* Notifies the (active) subscribers of the given topic. Allows to exclude a single user from notification, usually
* the one performing the change leading to this notification.
*
* @param environmentId ID of the Kafka environment of the topic.
* @param topicName Name of the topic to load the subscriptions for.
* @param notificationParams Content of the notification.
* @param excludeUser User name to exclude from notification, or <code>null</code> to not exclude any user.
* @return A CompletableFuture which completes when the notification e-mail has been sent, or which fails if the
* e-mail could not be sent for any reason.
*/
CompletableFuture<Void> notifySubscribers(String environmentId, String topicName,
NotificationParams notificationParams, String excludeUser);
CompletableFuture<Void> notifyRequestor(ApplicationOwnerRequest request, NotificationParams notificationParams);
CompletableFuture<Void> notifyAdmins(NotificationParams notificationParams);
CompletableFuture<Void> notifyProducer(NotificationParams notificationParams, String currentUserEmail,
String producerApplicationId);
CompletableFuture<Void> notifyTopicOwners(String environmentId, String topicName,
NotificationParams notificationParams);
CompletableFuture<Void> notifyApplicationTopicOwners(String applicationId, NotificationParams notificationParams);
}
| 2,055 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationServiceImpl.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/notifications/impl/NotificationServiceImpl.java | package com.hermesworld.ais.galapagos.notifications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.notifications.NotificationParams;
import com.hermesworld.ais.galapagos.notifications.NotificationService;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import jakarta.mail.MessagingException;
import jakarta.mail.internet.AddressException;
import jakarta.mail.internet.InternetAddress;
import jakarta.mail.internet.MimeMessage;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.task.TaskExecutor;
import org.springframework.mail.MailException;
import org.springframework.mail.javamail.JavaMailSender;
import org.springframework.mail.javamail.MimeMessageHelper;
import org.springframework.stereotype.Service;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.thymeleaf.ITemplateEngine;
import org.thymeleaf.context.Context;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
@Service
@Slf4j
public class NotificationServiceImpl implements NotificationService {
private final SubscriptionService subscriptionService;
private final ApplicationsService applicationsService;
private final TopicService topicService;
private final JavaMailSender mailSender;
private final TaskExecutor taskExecutor;
private final ITemplateEngine templateEngine;
private final InternetAddress fromAddress;
private final List<InternetAddress> adminMailRecipients;
public NotificationServiceImpl(SubscriptionService subscriptionService, ApplicationsService applicationsService,
TopicService topicService, JavaMailSender mailSender, TaskExecutor taskExecutor,
@Qualifier("emailTemplateEngine") ITemplateEngine templateEngine,
@Value("${galapagos.mail.sender:Galapagos <me@privacy.net>}") String fromAddress,
@Value("${galapagos.mail.admin-recipients:}") String adminMailRecipients) {
this.subscriptionService = subscriptionService;
this.applicationsService = applicationsService;
this.topicService = topicService;
this.mailSender = mailSender;
this.taskExecutor = taskExecutor;
this.templateEngine = templateEngine;
try {
this.fromAddress = InternetAddress.parse(fromAddress)[0];
}
catch (AddressException e) {
throw new RuntimeException("Invalid e-mail address specified as Galapagos FROM address", e);
}
try {
this.adminMailRecipients = toRecipientsList(adminMailRecipients);
}
catch (AddressException e) {
throw new RuntimeException("Invalid e-mail address(es) specified as Galapagos admin e-mail recipients", e);
}
}
@Override
public CompletableFuture<Void> notifySubscribers(String environmentId, String topicName,
NotificationParams notificationParams, String excludeUser) {
List<SubscriptionMetadata> subscriptions = subscriptionService.getSubscriptionsForTopic(environmentId,
topicName, false);
Set<String> applicationIds = subscriptions.stream().map(SubscriptionMetadata::getClientApplicationId)
.collect(Collectors.toSet());
Collection<ApplicationOwnerRequest> requests = applicationsService.getAllApplicationOwnerRequests();
final String finalExcludeUser = excludeUser == null ? "" : excludeUser;
Set<String> recipients = new HashSet<>();
for (String applicationId : applicationIds) {
recipients.addAll(requests.stream()
.filter(req -> req.getState().equals(RequestState.APPROVED)
&& applicationId.equals(req.getApplicationId())
&& !finalExcludeUser.equals(req.getUserName()))
.map(ApplicationOwnerRequest::getNotificationEmailAddress).filter(s -> StringUtils.hasLength(s))
.collect(Collectors.toSet()));
}
return doSendAsync(notificationParams, safeToRecipientsList(recipients, true), true);
}
@Override
public CompletableFuture<Void> notifyRequestor(ApplicationOwnerRequest request,
NotificationParams notificationParams) {
if (!StringUtils.hasLength(request.getNotificationEmailAddress())) {
log.warn("Could not send e-mail to requestor: no e-mail address found in request " + request.getId());
return CompletableFuture.completedFuture(null);
}
try {
return doSendAsync(notificationParams,
Arrays.asList(InternetAddress.parse(request.getNotificationEmailAddress())), false);
}
catch (AddressException e) {
log.error("Invalid e-mail address found in request, could not send notification e-mail to "
+ request.getNotificationEmailAddress(), e);
return CompletableFuture.failedFuture(e);
}
}
@Override
public CompletableFuture<Void> notifyAdmins(NotificationParams notificationParams) {
return doSendAsync(notificationParams, this.adminMailRecipients, false);
}
@Override
public CompletableFuture<Void> notifyProducer(NotificationParams notificationParams, String currentUserEmail,
String producerApplicationId) {
Set<String> mailAddresses = applicationsService.getAllApplicationOwnerRequests().stream()
.filter(ownerReq -> ownerReq.getState().equals(RequestState.APPROVED)
&& ownerReq.getApplicationId().equals(producerApplicationId)
&& !ownerReq.getNotificationEmailAddress().equals(currentUserEmail))
.map(req -> req.getNotificationEmailAddress()).collect(Collectors.toSet());
return doSendAsync(notificationParams, safeToRecipientsList(mailAddresses, true), false);
}
@Override
public CompletableFuture<Void> notifyTopicOwners(String environmentId, String topicName,
NotificationParams notificationParams) {
String ownerApplicationId = topicService.getTopic(environmentId, topicName).map(m -> m.getOwnerApplicationId())
.orElse(null);
if (ownerApplicationId == null) {
return CompletableFuture.failedFuture(
new IllegalArgumentException("Unknown topic " + topicName + " on environment " + environmentId));
}
return notifyApplicationTopicOwners(ownerApplicationId, notificationParams);
}
@Override
public CompletableFuture<Void> notifyApplicationTopicOwners(String applicationId,
NotificationParams notificationParams) {
List<String> mailAddresses = applicationsService.getAllApplicationOwnerRequests().stream()
.filter(r -> r.getState().equals(RequestState.APPROVED) && applicationId.equals(r.getApplicationId()))
.map(ApplicationOwnerRequest::getNotificationEmailAddress).distinct().collect(Collectors.toList());
return doSendAsync(notificationParams, safeToRecipientsList(mailAddresses, true), true);
}
private CompletableFuture<Void> doSendAsync(NotificationParams params, List<InternetAddress> recipients,
boolean bcc) {
if (recipients.isEmpty()) {
log.debug("No recipients specified, not sending e-mail with template " + params.getTemplateName());
return CompletableFuture.completedFuture(null);
}
String[] data = processTemplate(params);
String subject = data[0];
String htmlBody = data[1];
String plainBody = data[2];
CompletableFuture<Void> future = new CompletableFuture<>();
Runnable task = () -> {
try {
MimeMessage msg = mailSender.createMimeMessage();
MimeMessageHelper helper = new MimeMessageHelper(msg, true);
helper.setFrom(fromAddress);
if (bcc) {
helper.setBcc(recipients.toArray(new InternetAddress[recipients.size()]));
}
else {
helper.setTo(recipients.toArray(new InternetAddress[recipients.size()]));
}
helper.setText(plainBody, htmlBody);
helper.setSubject(subject);
log.debug("Sending e-mail to " + recipients.size() + " recipient(s)...");
mailSender.send(msg);
log.debug("E-mail sent successfully.");
future.complete(null);
}
catch (MessagingException | MailException e) {
log.error("Exception when sending notification e-mail", e);
future.complete(null);
}
};
taskExecutor.execute(task);
return future;
}
private String[] processTemplate(NotificationParams params) {
Context ctx = new Context();
ctx.setVariables(params.getVariables());
String htmlCode = this.templateEngine.process(params.getTemplateName(), ctx);
Document doc = Jsoup.parse(htmlCode);
String subject = doc.head().getElementsByTag("title").text();
String plainText = doc.body().text();
return new String[] { subject, htmlCode, plainText };
}
private List<InternetAddress> safeToRecipientsList(Collection<String> recipients, boolean logWarnings) {
List<InternetAddress> addresses = new ArrayList<>(recipients.size());
for (String address : recipients) {
try {
addresses.addAll(Arrays.asList(InternetAddress.parse(address)));
}
catch (AddressException e) {
if (logWarnings) {
log.warn("Invalid e-mail address found in request, cannot notify: " + address, e);
}
}
}
return addresses;
}
private static List<InternetAddress> toRecipientsList(String recipients) throws AddressException {
if (ObjectUtils.isEmpty(recipients)) {
return Collections.emptyList();
}
return Arrays.asList(InternetAddress.parse(recipients));
}
}
| 10,724 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationEventListener.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/notifications/impl/NotificationEventListener.java | package com.hermesworld.ais.galapagos.notifications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.notifications.NotificationParams;
import com.hermesworld.ais.galapagos.notifications.NotificationService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import jakarta.servlet.http.HttpServletRequest;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
/**
* This is the central component listening for all types of events in Galapagos and notifying the relevant parties. Here
* is the mapping logic of "what happened?" to "who should be notified?". For the real notification, the
* {@link NotificationService} is used.
*
* @author AlbrechtFlo
*/
@Component
@Slf4j
public class NotificationEventListener
implements TopicEventsListener, SubscriptionEventsListener, ApplicationEventsListener, EventContextSource {
private final NotificationService notificationService;
private final ApplicationsService applicationsService;
private final TopicService topicService;
private final CurrentUserService userService;
private final KafkaClusters kafkaClusters;
// TODO externalize
private final String unknownApp = "(unknown app)";
private final String unknownUser = "(unknown user)";
private final String unknownEnv = "(unknown environment)";
private static final String HTTP_REQUEST_URL_KEY = NotificationEventListener.class.getName() + "_requestUrl";
private static final String USER_NAME_KEY = NotificationEventListener.class.getName() + "_userName";
private static final String IS_ADMIN_KEY = NotificationEventListener.class.getName() + "_isAdmin";
public NotificationEventListener(NotificationService notificationService, ApplicationsService applicationsService,
TopicService topicService, CurrentUserService userService, KafkaClusters kafkaClusters) {
this.notificationService = notificationService;
this.applicationsService = applicationsService;
this.topicService = topicService;
this.userService = userService;
this.kafkaClusters = kafkaClusters;
}
@Override
public CompletableFuture<Void> handleSubscriptionCreated(SubscriptionEvent event) {
if (event.getMetadata().getState() == SubscriptionState.PENDING) {
String topicName = event.getMetadata().getTopicName();
String environmentId = event.getContext().getKafkaCluster().getId();
String clientApplicationName = applicationsService
.getKnownApplication(event.getMetadata().getClientApplicationId()).map(KnownApplication::getName)
.orElse(unknownApp);
String ownerApplicationName = topicService.getTopic(environmentId, topicName)
.flatMap(t -> applicationsService.getKnownApplication(t.getOwnerApplicationId()))
.map(KnownApplication::getName).orElse(unknownApp);
NotificationParams params = new NotificationParams("approve_subscription");
params.addVariable("topic_name", topicName);
params.addVariable("client_application_name", clientApplicationName);
params.addVariable("owner_application_name", ownerApplicationName);
params.addVariable("subscription_description", event.getMetadata().getDescription());
params.addVariable("galapagos_topic_url",
buildUIUrl(event, "/topics/" + topicName + "?environment=" + environmentId));
return notificationService.notifyTopicOwners(environmentId, topicName, params);
}
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleSubscriptionDeleted(SubscriptionEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleSubscriptionUpdated(SubscriptionEvent event) {
String topicName = event.getMetadata().getTopicName();
String environmentId = event.getContext().getKafkaCluster().getId();
String environmentName = kafkaClusters.getEnvironmentMetadata(environmentId)
.map(KafkaEnvironmentConfig::getName).orElse(unknownEnv);
String clientApplicationId = event.getMetadata().getClientApplicationId();
String clientApplicationName = applicationsService
.getKnownApplication(event.getMetadata().getClientApplicationId()).map(KnownApplication::getName)
.orElse(unknownApp);
String ownerApplicationName = topicService.getTopic(environmentId, topicName)
.flatMap(t -> applicationsService.getKnownApplication(t.getOwnerApplicationId()))
.map(KnownApplication::getName).orElse(unknownApp);
NotificationParams params = new NotificationParams(
"subscription-" + event.getMetadata().getState().name().toLowerCase(Locale.US));
params.addVariable("topic_name", topicName);
params.addVariable("env_name", environmentName);
params.addVariable("client_application_name", clientApplicationName);
params.addVariable("owner_application_name", ownerApplicationName);
params.addVariable("galapagos_topic_url",
buildUIUrl(event, "/topics/" + topicName + "?environment=" + environmentId));
return notificationService.notifyApplicationTopicOwners(clientApplicationId, params);
}
@Override
public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) {
// only notify for production environment, to avoid N mails for N environments
if (kafkaClusters.getProductionEnvironmentId().equals(event.getContext().getKafkaCluster().getId())) {
return handleTopicChange(event, "als \"deprecated\" markiert");
}
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) {
// only notify for production environment, to avoid N mails for N environments
if (kafkaClusters.getProductionEnvironmentId().equals(event.getContext().getKafkaCluster().getId())) {
return handleTopicChange(event, "die \"deprecated\"-Markierung entfernt");
}
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) {
return handleTopicChange(event,
"ein neues JSON-Schema veröffentlicht (" + event.getNewSchema().getChangeDescription() + ")");
}
@Override
public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) {
return handleTopicChange(event, "ein JSON-Schema wurde gelöscht ( )");
}
@Override
public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) {
return handleTopicProducerEvent("new-producer-added", event.getProducerApplicationId(), event);
}
@Override
public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) {
return handleTopicProducerEvent("producer-deleted", event.getProducerApplicationId(), event);
}
private CompletableFuture<Void> handleTopicProducerEvent(String templateName, String producerApplicationId,
TopicEvent event) {
String environmentId = event.getContext().getKafkaCluster().getId();
String environmentName = kafkaClusters.getEnvironmentMetadata(environmentId)
.map(KafkaEnvironmentConfig::getName).orElse(unknownEnv);
NotificationParams params = new NotificationParams(templateName);
String currentUserEmail = userService.getCurrentUserEmailAddress().orElse(unknownUser);
params.addVariable("topic_name", event.getMetadata().getName());
params.addVariable("env_name", environmentName);
Optional<KnownApplication> producerApp = applicationsService.getKnownApplication(producerApplicationId);
params.addVariable("producer_application_name", producerApp.map(KnownApplication::getName).orElse(unknownApp));
params.addVariable("galapagos_apps_url", buildUIUrl(event, "/applications"));
return notificationService.notifyProducer(params, currentUserEmail, producerApplicationId);
}
@Override
public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleApplicationRegistered(ApplicationEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleApplicationAuthenticationChanged(ApplicationAuthenticationChangeEvent event) {
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCreated(ApplicationOwnerRequestEvent event) {
NotificationParams params = new NotificationParams("new-appowner-request");
params.addVariable("galapagos_admin_url", buildUIUrl(event, "/admin"));
Optional<Boolean> isAdmin = event.getContext().getContextValue(IS_ADMIN_KEY).map(o -> (Boolean) o);
if (isAdmin.orElse(false)) {
return FutureUtil.noop();
}
return notificationService.notifyAdmins(params);
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestUpdated(ApplicationOwnerRequestEvent event) {
RequestState newState = event.getRequest().getState();
String userName = event.getContext().getContextValue(USER_NAME_KEY).map(Object::toString).orElse(unknownUser);
String requestorUserName = event.getRequest().getUserName();
if (userName.equals(requestorUserName)) {
return FutureUtil.noop();
}
NotificationParams params = new NotificationParams(
"appowner-request-" + newState.toString().toLowerCase(Locale.US));
params.addVariable("galapagos_apps_url", buildUIUrl(event, "/applications"));
params.addVariable("user_name", requestorUserName);
params.addVariable("updated_by", userName);
Optional<KnownApplication> app = applicationsService.getKnownApplication(event.getRequest().getApplicationId());
params.addVariable("app_name", app.map(KnownApplication::getName).orElse(unknownApp));
return notificationService.notifyRequestor(event.getRequest(), params);
}
@Override
public CompletableFuture<Void> handleApplicationOwnerRequestCanceled(ApplicationOwnerRequestEvent event) {
return FutureUtil.noop();
}
private CompletableFuture<Void> handleTopicChange(TopicEvent event, String changeText) {
String environmentId = event.getContext().getKafkaCluster().getId();
String topicName = event.getMetadata().getName();
String userName = event.getContext().getContextValue(USER_NAME_KEY).map(Object::toString).orElse(unknownUser);
String environmentName = kafkaClusters.getEnvironmentMetadata(environmentId)
.map(KafkaEnvironmentConfig::getName).orElse(unknownEnv);
// TODO externalize strings
NotificationParams params = new NotificationParams("topic-changed");
params.addVariable("user_name", userName);
params.addVariable("topic_name", topicName);
params.addVariable("change_action_text", changeText);
params.addVariable("galapagos_topic_url",
buildUIUrl(event, "/topics/" + topicName + "?environment=" + environmentId));
params.addVariable("environment_name", environmentName);
return notificationService.notifySubscribers(environmentId, topicName, params, userName);
}
@Override
public Map<String, Object> getContextValues() {
// store the HttpRequest in the event context, as we may otherwise not be able to get it later (different
// Thread)
// same for current user name
Map<String, Object> result = new HashMap<>();
getCurrentHttpRequest().ifPresent(req -> result.put(HTTP_REQUEST_URL_KEY, req.getRequestURL().toString()));
userService.getCurrentUserName().ifPresent(name -> result.put(USER_NAME_KEY, name));
result.put(IS_ADMIN_KEY, userService.isAdmin());
return result;
}
private String buildUIUrl(AbstractGalapagosEvent event, String uri) {
Optional<String> opRequestUrl = event.getContext().getContextValue(HTTP_REQUEST_URL_KEY);
if (opRequestUrl.isEmpty()) {
return "#";
}
try {
URL requestUrl = new URL(opRequestUrl.get());
return new URL(requestUrl.getProtocol(), requestUrl.getHost(), requestUrl.getPort(),
"/app/" + (uri.startsWith("/") ? uri.substring(1) : uri)).toString();
}
catch (MalformedURLException e) {
log.warn("Could not parse request URL from HTTP Request", e);
return "#";
}
}
private static Optional<HttpServletRequest> getCurrentHttpRequest() {
return Optional.ofNullable(RequestContextHolder.getRequestAttributes()).filter(
requestAttributes -> ServletRequestAttributes.class.isAssignableFrom(requestAttributes.getClass()))
.map(requestAttributes -> ((ServletRequestAttributes) requestAttributes))
.map(ServletRequestAttributes::getRequest);
}
}
| 14,825 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
MailConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/notifications/impl/MailConfig.java | package com.hermesworld.ais.galapagos.notifications.impl;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.thymeleaf.TemplateEngine;
import org.thymeleaf.extras.java8time.dialect.Java8TimeDialect;
import org.thymeleaf.spring6.SpringTemplateEngine;
import org.thymeleaf.templatemode.TemplateMode;
import org.thymeleaf.templateresolver.ClassLoaderTemplateResolver;
import org.thymeleaf.templateresolver.ITemplateResolver;
@Configuration
public class MailConfig {
@Bean
public TemplateEngine emailTemplateEngine() {
final SpringTemplateEngine templateEngine = new SpringTemplateEngine();
templateEngine.addDialect(new Java8TimeDialect());
templateEngine.addTemplateResolver(htmlTemplateResolver());
return templateEngine;
}
private ITemplateResolver htmlTemplateResolver() {
final ClassLoaderTemplateResolver templateResolver = new ClassLoaderTemplateResolver();
templateResolver.setOrder(1);
templateResolver.setPrefix("/mailtemplates/");
templateResolver.setSuffix(".html");
templateResolver.setTemplateMode(TemplateMode.HTML);
templateResolver.setCharacterEncoding("UTF-8");
templateResolver.setCacheable(false);
return templateResolver;
}
}
| 1,338 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ServiceAccountSpec.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/apiclient/ServiceAccountSpec.java | package com.hermesworld.ais.galapagos.ccloud.apiclient;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.PropertyNamingStrategies;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonNaming;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
@JsonDeserialize
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy.class)
@JsonIgnoreProperties(ignoreUnknown = true)
public class ServiceAccountSpec {
/** The Confluent Cloud resource ID, usually something like sa-xy123 */
private String resourceId;
/** The "internal" numeric ID of the service account, currently required for direct ACL updates */
private String numericId;
private String description;
private String displayName;
}
| 839 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudApiClient.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/apiclient/ConfluentCloudApiClient.java | package com.hermesworld.ais.galapagos.ccloud.apiclient;
import com.fasterxml.jackson.core.JsonProcessingException;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.util.StringUtils;
import org.springframework.web.reactive.function.BodyInserters;
import org.springframework.web.reactive.function.client.ClientResponse;
import org.springframework.web.reactive.function.client.WebClient;
import reactor.core.publisher.Mono;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* An API Client for accessing the Confluent Cloud REST API. That is, all relevant REST endpoints starting with
* <code>https://api.confluent.cloud/</code>, but e.g. not the REST endpoints specific to a cluster. <br>
* <br>
* <b>Important note:</b> As of Sep 2022, only "internal" numeric IDs of service accounts can be used for setting ACLs
* for service accounts via KafkaAdmin, although the REST API never reports these internal IDs. Confluent works on a
* solution to enable usage of the "resource ID" of each service account for ACLs, but that is not yet ready (Confluent
* internal ticket Kengine-24). <br>
* Confluent offers an "inofficial" API endpoint for retrieving the internal ID for service accounts as a workaround. If
* you set <code>idCompatMode</code> to <code>true</code>, this endpoint is additionally called for retrieving and the
* internal ID for each service account, otherwise, the field <code>numericId</code> of {@link ServiceAccountSpec}
* objects will always be <code>null</code>.
*/
@SuppressWarnings("JavadocLinkAsPlainText")
@Slf4j
public class ConfluentCloudApiClient {
private static final String BASE_URL = "https://api.confluent.cloud";
private final String baseUrl;
private final WebClient client;
private final boolean idCompatMode;
/**
* Creates a new Confluent Cloud REST API Client, using a given API Key and secret.
*
* @param apiKey API Key to use for authentication.
* @param apiSecret Secret for the API Key for authentication.
* @param idCompatMode If <code>true</code>, the "internal" numeric ID of service accounts is retrieved and used for
* ACL related purposes. See description of this class for details.
*/
public ConfluentCloudApiClient(String baseUrl, String apiKey, String apiSecret, boolean idCompatMode) {
this.baseUrl = baseUrl;
this.client = WebClient.builder().baseUrl(baseUrl)
.defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.defaultHeader(HttpHeaders.AUTHORIZATION, buildAuth(apiKey, apiSecret)).build();
this.idCompatMode = idCompatMode;
}
public ConfluentCloudApiClient(String apiKey, String apiSecret, boolean idCompatMode) {
this(BASE_URL, apiKey, apiSecret, idCompatMode);
}
public Mono<List<ApiKeySpec>> listClusterApiKeys(String clusterId) {
log.debug("List Cluster API Keys");
return doPaginatedGet("/iam/v2/api-keys?spec.resource=" + clusterId, this::readApiKey,
"Could not retrieve cluster API Key list");
}
public Mono<List<ServiceAccountSpec>> listServiceAccounts() {
log.debug("List service accounts");
return doPaginatedGet("/iam/v2/service-accounts", obj -> toServiceAccountSpec(obj),
"Could not retrieve service accounts");
}
public Mono<ServiceAccountSpec> createServiceAccount(String accountName, String accountDescription) {
log.debug("Create Service Account {}", accountName);
JSONObject req = new JSONObject();
req.put("display_name", accountName);
req.put("description", accountDescription);
return doPost("/iam/v2/service-accounts", req.toString(), response -> toServiceAccountSpec(response),
"Could not create service account").flatMap(spec -> perhapsAddInternalId(spec));
}
public Mono<ApiKeySpec> createApiKey(String envId, String clusterId, String description,
String serviceAccountResourceId) {
log.debug("Create API Key {}", description);
JSONObject spec = new JSONObject();
spec.put("display_name", "");
spec.put("description", description);
spec.put("owner", Map.of("id", serviceAccountResourceId, "environment", envId));
spec.put("resource", Map.of("id", clusterId, "environment", envId));
String request = new JSONObject(Map.of("spec", spec)).toString();
return doPost("/iam/v2/api-keys", request, this::readApiKey, "Could not create API Key");
}
public Mono<Boolean> deleteApiKey(ApiKeySpec apiKeySpec) {
log.debug("Delete API Key {}", apiKeySpec.getId());
return doDelete("/iam/v2/api-keys/" + apiKeySpec.getId(), "Could not delete API key").map(o -> true);
}
/**
* Uses the unofficial endpoint https://api.confluent.cloud/service_accounts to retrieve the internal numeric IDs
* for each Service Account in the current organization.
*
* @return A map from Confluent resource IDs (e.g. sa-xy123) to numeric service account IDs (e.g. 123456).
*/
public Mono<Map<String, String>> getServiceAccountInternalIds() {
log.debug("Get service account numeric IDs");
return doDirectGet("/service_accounts", "Could not access or read /service_accounts workaround endpoint")
.flatMap(response -> toServiceAccountIdMap(response));
}
private ApiKeySpec readApiKey(JSONObject obj) {
JSONObject spec = obj.getJSONObject("spec");
JSONObject metadata = obj.getJSONObject("metadata");
ApiKeySpec result = new ApiKeySpec();
result.setId(obj.getString("id"));
result.setDescription(spec.getString("description"));
result.setCreatedAt(Instant.parse(metadata.getString("created_at")));
result.setSecret(
spec.has("secret") && StringUtils.hasLength(spec.getString("secret")) ? spec.getString("secret")
: null);
result.setServiceAccountId(spec.getJSONObject("owner").getString("id"));
return result;
}
private Mono<Map<String, String>> toServiceAccountIdMap(String jsonResponse) {
try {
JSONObject obj = new JSONObject(jsonResponse);
JSONArray users = obj.getJSONArray("users");
Map<String, String> result = new HashMap<>();
for (int i = 0; i < users.length(); i++) {
JSONObject user = users.getJSONObject(i);
if (user.optBoolean("service_account")) {
result.put(user.getString("resource_id"), user.get("id").toString());
}
}
return Mono.just(result);
}
catch (JSONException e) {
return Mono.error(e);
}
}
private Mono<ServiceAccountSpec> perhapsAddInternalId(ServiceAccountSpec spec) {
if (idCompatMode) {
return getServiceAccountInternalIds().map(idMap -> {
spec.setNumericId(idMap.get(spec.getResourceId()));
return spec;
});
}
return Mono.just(spec);
}
private <T> Mono<T> doPost(String uri, String body, SingleObjectMapper<T> responseBodyHandler,
String errorMessage) {
log.debug("Confluent API POST Request: uri = {}, body = {}", uri, body);
return doMethod(WebClient::post, uri, body, responseBodyHandler, errorMessage);
}
@SuppressWarnings("SameParameterValue")
private Mono<String> doDirectGet(String uri, String errorMessage) {
log.debug("Confluent API GET Request: uri = {}", uri);
return client.get().uri(uri).retrieve()
.onStatus(status -> status.isError(), errorResponseHandler(uri, errorMessage)).bodyToMono(String.class);
}
private <T> Mono<List<T>> doPaginatedGet(String uri, SingleObjectMapper<T> dataElementExtractor,
String errorMessage) {
log.debug("Confluent API GET Request (using pagination): uri = {}", uri);
String localUri = uri;
if (!uri.contains("page_token")) {
if (uri.contains("?")) {
localUri = uri + "&page_size=100";
}
else {
localUri = uri + "?page_size=100";
}
}
// We perform our own parsing, as otherwise, Spring would expand %3D in the String (returned by "next" page of
// Confluent Cloud API) to %253D.
URI realUri;
try {
realUri = new URI(baseUrl).resolve(new URI(localUri));
}
catch (URISyntaxException e) {
log.error("Could not perform REST API request due to invalid URI {}", localUri, e);
return Mono.just(List.of());
}
return client.get().uri(realUri).retrieve()
.onStatus(status -> status.isError(), errorResponseHandler(uri, errorMessage)).bodyToMono(String.class)
.flatMap(body -> {
try {
JSONObject objBody = new JSONObject(body);
if (!objBody.has("data")) {
return Mono.error(new ConfluentApiException(
"Paginated endpoint " + uri + " did not return a data array"));
}
if (!objBody.has("metadata")) {
return Mono.error(new ConfluentApiException(
"Paginated endpoint " + uri + " did not return pagination metadata"));
}
JSONObject metadata = objBody.getJSONObject("metadata");
JSONArray data = objBody.getJSONArray("data");
List<T> values = new ArrayList<>();
for (int i = 0; i < data.length(); i++) {
try {
values.add(dataElementExtractor.apply(data.getJSONObject(i)));
}
catch (JSONException | JsonProcessingException e) {
return Mono
.error(new ConfluentApiException("Could not parse element of data array", e));
}
}
if (metadata.has("next")) {
String relativeUri = metadata.getString("next").replace(BASE_URL, "");
return doPaginatedGet(relativeUri, dataElementExtractor, errorMessage).map(
ls -> Stream.concat(values.stream(), ls.stream()).collect(Collectors.toList()));
}
return Mono.just(values);
}
catch (JSONException e) {
return Mono.error(new ConfluentApiException("Could not parse Confluent Cloud API response", e));
}
});
}
@SuppressWarnings("SameParameterValue")
private Mono<?> doDelete(String uri, String errorMessage) {
return client.delete().uri(uri).retrieve()
.onStatus(status -> status.isError(), errorResponseHandler(uri, errorMessage)).toBodilessEntity();
}
private <T> Mono<T> doMethod(Function<WebClient, WebClient.RequestBodyUriSpec> method, String uri, String body,
SingleObjectMapper<T> responseBodyHandler, String errorMessage) {
return method.apply(client).uri(uri).body(BodyInserters.fromValue(body)).retrieve()
.onStatus(status -> status.isError(), errorResponseHandler(uri, errorMessage)).bodyToMono(String.class)
.map(response -> {
try {
return responseBodyHandler.apply(new JSONObject(response));
}
catch (JSONException | JsonProcessingException e) {
throw new ConfluentApiException("Could not parse Confluent Cloud API response", e);
}
});
}
private ServiceAccountSpec toServiceAccountSpec(JSONObject jsonResponse) throws JSONException {
ServiceAccountSpec spec = new ServiceAccountSpec();
spec.setDisplayName(jsonResponse.getString("display_name"));
spec.setResourceId(jsonResponse.getString("id"));
spec.setDescription(jsonResponse.getString("description"));
return spec;
}
private Function<ClientResponse, Mono<? extends Throwable>> errorResponseHandler(String uri, String errorMessage) {
return response -> response.bodyToMono(String.class).map(body -> {
try {
JSONObject result = new JSONObject(body);
if (result.has("error") && !result.isNull("error")) {
return new ConfluentApiException(errorMessage + ": " + result.getString("error"));
}
if (result.has("errors") && result.getJSONArray("errors").length() > 0) {
return createApiExceptionFromErrors(errorMessage, result.getJSONArray("errors"));
}
}
catch (JSONException e) {
// then fallback to simple exception
}
return new ConfluentApiException(
errorMessage + ": Server returned " + response.statusCode().value() + " for " + uri);
}).defaultIfEmpty(new ConfluentApiException(
errorMessage + ": Server returned " + response.statusCode().value() + " for " + uri));
}
// currently, we only take the first error of the array, and use its "detail" as error message.
// Maybe in future, we can add more details to the Exception object, if required.
private ConfluentApiException createApiExceptionFromErrors(String errorMessage, JSONArray errors) {
JSONObject error = errors.getJSONObject(0);
String msg = error.getString("detail");
return new ConfluentApiException(errorMessage + ": " + msg);
}
private static String buildAuth(String apiKey, String apiSecret) {
return "Basic " + HttpHeaders.encodeBasicAuth(apiKey, apiSecret, StandardCharsets.UTF_8);
}
@FunctionalInterface
private interface SingleObjectMapper<T> {
T apply(JSONObject obj) throws JsonProcessingException;
}
}
| 14,827 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentApiException.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/apiclient/ConfluentApiException.java | package com.hermesworld.ais.galapagos.ccloud.apiclient;
public class ConfluentApiException extends RuntimeException {
public ConfluentApiException(String message) {
super(message);
}
public ConfluentApiException(String message, Throwable cause) {
super(message, cause);
}
}
| 310 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApiKeySpec.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/apiclient/ApiKeySpec.java | package com.hermesworld.ais.galapagos.ccloud.apiclient;
import lombok.Getter;
import lombok.Setter;
import java.time.Instant;
@Getter
@Setter
public class ApiKeySpec {
private String id;
private String secret;
private String description;
private Instant createdAt;
private String serviceAccountId;
}
| 329 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudAuthConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/auth/ConfluentCloudAuthConfig.java | package com.hermesworld.ais.galapagos.ccloud.auth;
import lombok.Getter;
import lombok.Setter;
import java.util.Objects;
@Getter
@Setter
public class ConfluentCloudAuthConfig {
private String environmentId;
private String clusterId;
private String clusterApiKey;
private String clusterApiSecret;
private String developerApiKeyValidity;
private String organizationApiKey;
private String organizationApiSecret;
private Boolean serviceAccountIdCompatMode;
public boolean isServiceAccountIdCompatMode() {
// As Confluent Cloud now fully supports ResID-based ACLs, we do no longer have this to be the default
return Objects.requireNonNullElse(serviceAccountIdCompatMode, false);
}
}
| 747 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudAuthUtil.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/auth/ConfluentCloudAuthUtil.java | package com.hermesworld.ais.galapagos.ccloud.auth;
import org.json.JSONException;
import org.json.JSONObject;
import java.time.Instant;
import java.time.format.DateTimeParseException;
public final class ConfluentCloudAuthUtil {
private ConfluentCloudAuthUtil() {
}
public static String getApiKey(String authJson) {
try {
return new JSONObject(authJson).getString(ConfluentCloudAuthenticationModule.JSON_API_KEY);
}
catch (JSONException e) {
return null;
}
}
public static Instant getExpiresAt(String authJson) {
try {
return Instant
.parse(new JSONObject(authJson).getString(ConfluentCloudAuthenticationModule.JSON_EXPIRES_AT));
}
catch (DateTimeParseException | JSONException e) {
return null;
}
}
}
| 864 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudAuthenticationModule.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/ccloud/auth/ConfluentCloudAuthenticationModule.java | package com.hermesworld.ais.galapagos.ccloud.auth;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ApiKeySpec;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentCloudApiClient;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ServiceAccountSpec;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.util.StringUtils;
import java.nio.charset.StandardCharsets;
import java.text.MessageFormat;
import java.time.Duration;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
@Slf4j
public class ConfluentCloudAuthenticationModule implements KafkaAuthenticationModule {
private final static String APP_SERVICE_ACCOUNT_DESC = "APP_{0}";
private final static String DEVELOPER_SERVICE_ACCOUNT_DESC = "DEV_{0}";
private final static String API_KEY_DESC = "Application {0}";
private final static String API_DEVELOPER_KEY_DESC = "Developer {0}";
final static String JSON_API_KEY = "apiKey";
private final static String JSON_ISSUED_AT = "issuedAt";
private final static String JSON_USER_ID = "userId";
final static String JSON_EXPIRES_AT = "expiresAt";
private final static String JSON_NUMERIC_ID = "numericId";
private final static String NUMERIC_ID_PATTERN = "\\d{5,7}";
private final ConfluentCloudApiClient client;
private final ConfluentCloudAuthConfig config;
private final Map<String, String> serviceAccountNumericIds = new ConcurrentHashMap<>();
public ConfluentCloudAuthenticationModule(ConfluentCloudAuthConfig config) {
this.client = new ConfluentCloudApiClient(config.getOrganizationApiKey(), config.getOrganizationApiSecret(),
config.isServiceAccountIdCompatMode());
this.config = config;
try {
this.getDeveloperApiKeyValidity();
}
catch (DateTimeParseException e) {
throw new IllegalArgumentException(
"Invalid date for developer API key validity: " + config.getDeveloperApiKeyValidity(), e);
}
}
@Override
public CompletableFuture<Void> init() {
if (config.isServiceAccountIdCompatMode()) {
return getServiceAccountNumericIds().thenApply(o -> null);
}
return FutureUtil.noop();
}
@Override
public void addRequiredKafkaProperties(Properties kafkaProperties) {
kafkaProperties.setProperty("sasl.mechanism", "PLAIN");
kafkaProperties.setProperty("sasl.jaas.config",
"org.apache.kafka.common.security.plain.PlainLoginModule required username='"
+ config.getClusterApiKey() + "' password='" + config.getClusterApiSecret() + "';");
kafkaProperties.setProperty("security.protocol", "SASL_SSL");
}
@Override
public CompletableFuture<CreateAuthenticationResult> createApplicationAuthentication(String applicationId,
String applicationNormalizedName, JSONObject createParams) {
String apiKeyDesc = MessageFormat.format(API_KEY_DESC, applicationNormalizedName);
log.info("Creating API Key for application (normalized name) {}", applicationNormalizedName);
// reset internal ID cache
serviceAccountNumericIds.clear();
String shortenedAppName = applicationNormalizedName.substring(0,
Math.min(50, applicationNormalizedName.length()));
return findServiceAccountForApp(applicationId)
.thenCompose(account -> account.map(a -> CompletableFuture.completedFuture(a))
.orElseGet(() -> client.createServiceAccount("application-" + shortenedAppName,
appServiceAccountDescription(applicationId)).toFuture()))
.thenCompose(account -> client.createApiKey(config.getEnvironmentId(), config.getClusterId(),
apiKeyDesc, account.getResourceId()).toFuture())
.thenCompose(keyInfo -> toCreateAuthResult(keyInfo, null));
}
@Override
public CompletableFuture<CreateAuthenticationResult> updateApplicationAuthentication(String applicationId,
String applicationNormalizedName, JSONObject createParameters, JSONObject existingAuthData) {
return deleteApplicationAuthentication(applicationId, existingAuthData).thenCompose(
o -> createApplicationAuthentication(applicationId, applicationNormalizedName, createParameters));
}
@Override
public CompletableFuture<Void> deleteApplicationAuthentication(String applicationId, JSONObject existingAuthData) {
String apiKey = existingAuthData.optString(JSON_API_KEY);
if (StringUtils.hasLength(apiKey)) {
log.info("Deleting API Key {}", apiKey);
return client.listClusterApiKeys(config.getClusterId()).toFuture()
.thenCompose(ls -> ls.stream().filter(info -> apiKey.equals(info.getId())).findAny()
.map(info -> client.deleteApiKey(info).toFuture().thenApply(o -> (Void) null))
.orElse(FutureUtil.noop()));
}
return FutureUtil.noop();
}
@Override
public String extractKafkaUserName(JSONObject existingAuthData) throws JSONException {
String userId = existingAuthData.optString(JSON_USER_ID);
if (!StringUtils.hasLength(userId)) {
throw new JSONException("No userId set in application authentication data");
}
// special treatment for Confluent resourceId <-> numericId problem
if (config.isServiceAccountIdCompatMode()) {
// case 1: already stored (Created by Galapagos 2.6.0 or later, or executed admin job
// update-confluent-auth-metadata)
String numericId = existingAuthData.optString(JSON_NUMERIC_ID);
if (StringUtils.hasLength(numericId)) {
return "User:" + numericId;
}
// case 2: stored user ID is numeric
if (userId.matches(NUMERIC_ID_PATTERN)) {
return "User:" + userId;
}
// worst case: lookup ID synchronous (bah!)
try {
Map<String, String> numericIdMap = getServiceAccountNumericIds().get();
if (numericIdMap.containsKey(userId)) {
return "User:" + numericIdMap.get(userId);
}
log.warn(
"Could not determine internal ID for service account {}, will return most likely invalid Kafka user name",
userId);
return "User:" + userId;
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
catch (ExecutionException e) {
log.error(
"Could not retrieve service account internal ID map, will return most likely invalid Kafka user name",
e);
return "User:" + userId;
}
}
return "User:" + userId;
}
@Override
public CompletableFuture<CreateAuthenticationResult> createDeveloperAuthentication(String userName,
JSONObject createParams) {
if (!supportsDeveloperApiKeys()) {
return CompletableFuture
.failedFuture(new IllegalStateException("Developer API Keys not enabled on this Environment"));
}
String apiKeyDesc = MessageFormat.format(API_DEVELOPER_KEY_DESC, userName);
Duration validity = getDeveloperApiKeyValidity().orElseThrow();
Instant expiresAt = Instant.now().plus(validity);
String finalUserName = userName.split("@")[0];
// reset internal ID cache
serviceAccountNumericIds.clear();
return findServiceAccountForDev(userName)
.thenCompose(
account -> account.map(a -> CompletableFuture.completedFuture(a))
.orElseGet(() -> client.createServiceAccount("developer-" + finalUserName,
devServiceAccountDescription(userName)).toFuture())
.thenCompose(acc -> client.createApiKey(config.getEnvironmentId(),
config.getClusterId(), apiKeyDesc, acc.getResourceId()).toFuture())
.thenCompose(keyInfo -> toCreateAuthResult(keyInfo, expiresAt)));
}
@Override
public CompletableFuture<Void> deleteDeveloperAuthentication(String userName, JSONObject existingAuthData) {
return deleteApplicationAuthentication(null, existingAuthData);
}
@Override
public Optional<Instant> extractExpiryDate(JSONObject authData) {
if (authData.has(JSON_EXPIRES_AT)) {
return Optional.of(Instant.from(DateTimeFormatter.ISO_INSTANT.parse(authData.getString(JSON_EXPIRES_AT))));
}
return Optional.empty();
}
public boolean supportsDeveloperApiKeys() {
return getDeveloperApiKeyValidity().isPresent();
}
/**
* Upgrades a given "old" authentication metadata object. Is used by admin job "update-confluent-auth-metadata" to
* e.g. add numeric IDs and Service Account Resource IDs.
*
* @param oldAuthMetadata Potentially "old" authentication metadata (function determines if update is required).
* @return The "updated" metadata, or the unchanged metadata if already filled with new fields. As a future as an
* API call may be required to determine required information (e.g. internal numeric ID for a service
* account). The future can fail if the API call fails.
*/
public CompletableFuture<JSONObject> upgradeAuthMetadata(JSONObject oldAuthMetadata) {
String userId = oldAuthMetadata.optString(JSON_USER_ID);
if (!StringUtils.hasLength(userId)) {
return CompletableFuture.completedFuture(oldAuthMetadata);
}
// numeric user ID means we have to retrieve service account ID
if (userId.matches(NUMERIC_ID_PATTERN)) {
return getServiceAccountNumericIds().thenApply(map -> {
String resourceId = map.entrySet().stream().filter(e -> userId.equals(e.getValue())).findAny()
.map(e -> e.getKey()).orElse(null);
if (resourceId == null) {
log.warn("Unable to determine Service Account resource ID for numeric ID {}", userId);
return oldAuthMetadata;
}
JSONObject newMetadata = new JSONObject(oldAuthMetadata.toString());
newMetadata.put(JSON_USER_ID, resourceId);
newMetadata.put(JSON_NUMERIC_ID, userId);
return newMetadata;
});
}
// for now, no other scenarios for upgrading are supported
return CompletableFuture.completedFuture(oldAuthMetadata);
}
private CompletableFuture<Map<String, String>> getServiceAccountNumericIds() {
if (this.serviceAccountNumericIds.isEmpty()) {
return client.getServiceAccountInternalIds().map(map -> {
this.serviceAccountNumericIds.putAll(map);
return map;
}).toFuture();
}
return CompletableFuture.completedFuture(serviceAccountNumericIds);
}
private CompletableFuture<Optional<ServiceAccountSpec>> findServiceAccountForApp(String applicationId) {
String desc = appServiceAccountDescription(applicationId);
return client.listServiceAccounts().toFuture()
.thenApply(ls -> ls.stream().filter(acc -> desc.equals(acc.getDescription())).findAny());
}
private CompletableFuture<Optional<ServiceAccountSpec>> findServiceAccountForDev(String userName) {
String desc = devServiceAccountDescription(userName);
return client.listServiceAccounts().toFuture()
.thenApply(ls -> ls.stream().filter(acc -> desc.equals(acc.getDescription())).findAny());
}
private String appServiceAccountDescription(String applicationId) {
return MessageFormat.format(APP_SERVICE_ACCOUNT_DESC, applicationId);
}
private String devServiceAccountDescription(String userName) {
return MessageFormat.format(DEVELOPER_SERVICE_ACCOUNT_DESC, userName);
}
private CompletableFuture<CreateAuthenticationResult> toCreateAuthResult(ApiKeySpec keyInfo, Instant expiresAt) {
JSONObject info = new JSONObject();
info.put(JSON_API_KEY, keyInfo.getId());
info.put(JSON_USER_ID, String.valueOf(keyInfo.getServiceAccountId()));
info.put(JSON_ISSUED_AT, keyInfo.getCreatedAt().toString());
if (expiresAt != null) {
info.put(JSON_EXPIRES_AT, expiresAt.toString());
}
if (config.isServiceAccountIdCompatMode()) {
return getServiceAccountNumericIds().thenApply(map -> {
info.put(JSON_NUMERIC_ID, map.get(keyInfo.getServiceAccountId()));
return new CreateAuthenticationResult(info, keyInfo.getSecret().getBytes(StandardCharsets.UTF_8));
});
}
return CompletableFuture.completedFuture(
new CreateAuthenticationResult(info, keyInfo.getSecret().getBytes(StandardCharsets.UTF_8)));
}
private Optional<Duration> getDeveloperApiKeyValidity() {
if (!StringUtils.hasLength(config.getDeveloperApiKeyValidity())) {
return Optional.empty();
}
return Optional.of(Duration.parse(this.config.getDeveloperApiKeyValidity()));
}
}
| 14,128 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
StagingResult.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/staging/StagingResult.java | package com.hermesworld.ais.galapagos.staging;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.changes.Change;
import lombok.Getter;
@JsonSerialize
@Getter
public final class StagingResult {
private Change change;
private boolean stagingSuccessful;
private String errorMessage;
public StagingResult(Change change, boolean stagingSuccessful, String errorMessage) {
this.change = change;
this.stagingSuccessful = stagingSuccessful;
this.errorMessage = errorMessage;
}
}
| 572 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
Staging.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/staging/Staging.java | package com.hermesworld.ais.galapagos.staging;
import java.util.List;
import com.hermesworld.ais.galapagos.changes.Change;
public interface Staging {
public String getApplicationId();
public String getSourceEnvironmentId();
public String getTargetEnvironmentId();
public List<Change> getChanges();
public List<StagingResult> perform();
}
| 367 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
StagingService.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/staging/StagingService.java | package com.hermesworld.ais.galapagos.staging;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import com.hermesworld.ais.galapagos.changes.Change;
/**
* This service can be used to calculate a {@link Staging} object, which describes the required changes to be performed
* on an environment to bring it up-to-date with the given source environment, for a given application.
*
* @author AlbrechtFlo
*
*/
public interface StagingService {
/**
* "Prepares" the staging for a given application and a given source environment. This means,
* <ul>
* <li>determining the appropriate target environment,</li>
* <li>calculating the differences between given source and the target environment, for the given application.</li>
* </ul>
* If the optional <code>changesFilter</code> is provided and not empty, the result is filtered, and only changes
* equal to one of the changes in the given filter list are included. Note that this may lead to an inconsistent
* change set, which may not fully be applied to the target environment successfully. <br>
* <br>
* To perform the staging, call <code>perform()</code> on the (asynchronously) returned <code>Staging</code> object.
*
* @param applicationId ID of the application to calculate the Staging object for.
* @param environmentIdFrom Source Environment ID
* @param changesFilter Optional filter to reduce the found changes to.
*
* @return A completable future providing the <code>Staging</code> object when done, or failing if the staging
* cannot be calculated for whatever reason.
*/
CompletableFuture<Staging> prepareStaging(String applicationId, String environmentIdFrom,
List<Change> changesFilter);
}
| 1,804 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.