From 142eedf3195be2e2eabcf7b0ec89594ab60f3192 Mon Sep 17 00:00:00 2001 From: Aleksandar Stanchev Date: Sun, 15 Feb 2026 17:00:42 +0200 Subject: [PATCH 1/5] Add dual-key encryption support for key rotation Implement dual-key configuration and fallback decryption to enable safe encryption key rotation without downtime or data loss. Signed-off-by: Aleksandar Stanchev --- .../config/DefaultFieldsEncryptionConfig.java | 32 +++++- .../config/FieldsEncryptionConfig.java | 34 +++++- .../ConnectionMongoSnapshotAdapter.java | 2 +- .../ConnectivityMongoEventAdapter.java | 3 +- .../persistence/JsonFieldsEncryptor.java | 105 ++++++++++++++---- .../src/main/resources/connectivity.conf | 16 ++- .../persistence/JsonFieldsEncryptorTest.java | 75 +++++++++++++ ...ction-fields-encryption-dual-key-test.conf | 61 ++++++++++ 8 files changed, 299 insertions(+), 29 deletions(-) create mode 100644 connectivity/service/src/test/resources/connection-fields-encryption-dual-key-test.conf diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java index 57153b2e231..9ee8722dd47 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java @@ -31,16 +31,35 @@ public final class DefaultFieldsEncryptionConfig implements FieldsEncryptionConf private static final String CONFIG_PATH = "encryption"; private final boolean isEncryptionEnabled; private final String symmetricalKey; + private final String oldSymmetricalKey; private final List jsonPointers; private DefaultFieldsEncryptionConfig(final ConfigWithFallback config) { this.isEncryptionEnabled = config.getBoolean(ConfigValue.ENCRYPTION_ENABLED.getConfigPath()); this.symmetricalKey = config.getString(ConfigValue.SYMMETRICAL_KEY.getConfigPath()); + this.oldSymmetricalKey = config.getString(ConfigValue.OLD_SYMMETRICAL_KEY.getConfigPath()); this.jsonPointers = Collections.unmodifiableList( new ArrayList<>(config.getStringList(ConfigValue.JSON_POINTERS.getConfigPath()))); - if (isEncryptionEnabled && symmetricalKey.trim().isEmpty()) { - throw new DittoConfigError("Missing Symmetric key. It is mandatory when encryption is enabled for connections!"); + + validateConfiguration(); + } + + private void validateConfiguration() { + final boolean hasSymmetricalKey = !symmetricalKey.trim().isEmpty(); + final boolean hasOldKey = !oldSymmetricalKey.trim().isEmpty(); + + // When encryption is enabled, we must have a current encryption key + if (isEncryptionEnabled && !hasSymmetricalKey) { + throw new DittoConfigError( + "Missing 'symmetrical-key'. It is mandatory when encryption is enabled for connections!"); + } + + // If both keys are set, they must be different + if (hasSymmetricalKey && hasOldKey && symmetricalKey.equals(oldSymmetricalKey)) { + throw new DittoConfigError( + "Configuration error: 'symmetrical-key' and 'old-symmetrical-key' must be different! " + + "If you're not rotating keys, remove 'old-symmetrical-key'."); } } @@ -61,6 +80,11 @@ public String getSymmetricalKey() { return this.symmetricalKey; } + @Override + public Optional getOldSymmetricalKey() { + return oldSymmetricalKey.trim().isEmpty() ? Optional.empty() : Optional.of(oldSymmetricalKey); + } + @Override public List getJsonPointers() { return this.jsonPointers; @@ -77,12 +101,13 @@ public boolean equals(final Object o) { final DefaultFieldsEncryptionConfig that = (DefaultFieldsEncryptionConfig) o; return isEncryptionEnabled == that.isEncryptionEnabled && Objects.equals(symmetricalKey, that.symmetricalKey) && + Objects.equals(oldSymmetricalKey, that.oldSymmetricalKey) && Objects.equals(jsonPointers, that.jsonPointers); } @Override public int hashCode() { - return Objects.hash(isEncryptionEnabled, symmetricalKey, jsonPointers); + return Objects.hash(isEncryptionEnabled, symmetricalKey, oldSymmetricalKey, jsonPointers); } @Override @@ -90,6 +115,7 @@ public String toString() { return getClass().getSimpleName() + "[" + "enabled=" + isEncryptionEnabled + ", symmetricalKey='***'" + + ", oldSymmetricalKey='" + (oldSymmetricalKey.trim().isEmpty() ? "not set" : "***") + "'" + ", jsonPointers=" + jsonPointers + ']'; } diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java index 70a88106a44..f20f144e311 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java @@ -14,6 +14,7 @@ package org.eclipse.ditto.connectivity.service.config; import java.util.List; +import java.util.Optional; import org.eclipse.ditto.internal.utils.config.KnownConfigValue; @@ -32,11 +33,29 @@ public interface FieldsEncryptionConfig { /** - * Returns the symmetricalKey used for encryption. - * @return the symmetricalKey + * Returns the current symmetrical key used for encryption. + * This is THE key used for encrypting new data. + * + * @return the current symmetrical key */ String getSymmetricalKey(); + /** + * Returns the old symmetrical key used for decryption fallback during key rotation. + * When set, the system will try to decrypt with the current key first, and fallback to this key if decryption fails. + *

+ * Typical usage during key rotation: + *

    + *
  1. Move current key to old-symmetrical-key
  2. + *
  3. Set new key as symmetrical-key
  4. + *
  5. Trigger migration via DevOps command
  6. + *
  7. Remove old-symmetrical-key after migration completes
  8. + *
+ * + * @return the old symmetrical key, empty if not configured + */ + Optional getOldSymmetricalKey(); + /** * Returns string json pointers to the values of json fields to be encrypted. @@ -57,11 +76,20 @@ enum ConfigValue implements KnownConfigValue { * Determines whether json value encryption is enabled. */ ENCRYPTION_ENABLED("encryption-enabled", false), + /** - * The symmetrical key used for encryption. + * The current symmetrical key used for encryption. + * This is THE key used for encrypting all new data. */ SYMMETRICAL_KEY("symmetrical-key", ""), + /** + * The old symmetrical key used for decryption fallback during key rotation. + * When set, the system will attempt to decrypt with symmetrical-key first, + * and fallback to this key if decryption fails. + */ + OLD_SYMMETRICAL_KEY("old-symmetrical-key", ""), + /** * The pointer to the json values to be encrypted. */ diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectionMongoSnapshotAdapter.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectionMongoSnapshotAdapter.java index 407ab65163e..a6b578d60d7 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectionMongoSnapshotAdapter.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectionMongoSnapshotAdapter.java @@ -85,7 +85,7 @@ protected Connection createJsonifiableFrom(final JsonObject jsonObject) { return ConnectionMigrationUtil.connectionFromJsonWithMigration(jsonObject); } final JsonObject decrypted = JsonFieldsEncryptor.decrypt(jsonObject, "", encryptionConfig.getJsonPointers(), - encryptionConfig.getSymmetricalKey()); + encryptionConfig.getSymmetricalKey(), encryptionConfig.getOldSymmetricalKey()); return ConnectionMigrationUtil.connectionFromJsonWithMigration(decrypted); } diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectivityMongoEventAdapter.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectivityMongoEventAdapter.java index d4e092fc6c7..a15137b336f 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectivityMongoEventAdapter.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/ConnectivityMongoEventAdapter.java @@ -74,7 +74,8 @@ protected JsonObjectBuilder performToJournalMigration(final Event event, fina @Override protected JsonObject performFromJournalMigration(final JsonObject jsonObject) { return JsonFieldsEncryptor.decrypt(jsonObject, ConnectivityConstants.ENTITY_TYPE.toString(), - encryptionConfig.getJsonPointers(), encryptionConfig.getSymmetricalKey()); + encryptionConfig.getJsonPointers(), encryptionConfig.getSymmetricalKey(), + encryptionConfig.getOldSymmetricalKey()); } private static EventRegistry> createEventRegistry() { diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java index fc617f226e8..501971f764e 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java @@ -21,9 +21,9 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException; import org.eclipse.ditto.connectivity.model.ConnectionUriInvalidException; import org.eclipse.ditto.connectivity.service.util.EncryptorAesGcm; -import org.eclipse.ditto.internal.utils.config.DittoConfigError; import org.eclipse.ditto.json.JsonFactory; import org.eclipse.ditto.json.JsonObject; import org.eclipse.ditto.json.JsonPointer; @@ -56,9 +56,7 @@ public final class JsonFieldsEncryptor { * @param jsonPointers the pointer to the values to be encrypted * @param symmetricKey the symmetrical key to be used for the encryption * @return a new encrypted {@link org.eclipse.ditto.json.JsonObject } - * @throws IllegalStateException if encryption fails. - * It is not advisable to catch it as in the case of failure it is not expected for the service to continue to work - * without encryption being properly configured. + * @throws ConnectionConfigurationInvalidException if encryption fails due to an invalid key configuration */ public static JsonObject encrypt(final JsonObject jsonObject, final String pointersPrefix, final List jsonPointers, final String symmetricKey) { @@ -74,16 +72,36 @@ public static JsonObject encrypt(final JsonObject jsonObject, final String point * @param pointersPrefix a prefix to be added if needed to all pointers. * Use empty string if not needed else can be a string starting or not with "/" * @param jsonPointers the pointer to the values to be decrypted - * @param symmetricKey the symmetrical key to be used for the description + * @param symmetricKey the symmetrical key to be used for the decryption * @return a new decrypted {@link org.eclipse.ditto.json.JsonObject } - * @throws IllegalStateException if decryption fails. - * It is not advisable to catch it as in the case of failure it is not expected for the service to continue to work - * without encryption being properly configured. + * @throws ConnectionConfigurationInvalidException if decryption fails due to an invalid key configuration */ - public static JsonObject decrypt(final JsonObject jsonObject, final String pointersPrefix, final List jsonPointers, - final String symmetricKey) { - return handle(jsonObject, prefixPointers(pointersPrefix, jsonPointers).map(JsonPointer::of) - .collect(Collectors.toList()), symmetricKey, JsonFieldsEncryptor::decryptValue); + public static JsonObject decrypt(final JsonObject jsonObject, final String pointersPrefix, + final List jsonPointers, final String symmetricKey) { + return decrypt(jsonObject, pointersPrefix, jsonPointers, symmetricKey, Optional.empty()); + } + + /** + * Decrypts json object fields with fallback key support for key rotation. + * Tries the current key first; if decryption fails and an old key is provided, retries with the old key. + * Only fields prefixed with {@value ENCRYPTED_PREFIX} prefix will be decrypted even if configured with a pointer. + * + * @param jsonObject the jsonObject whose fields should be decrypted + * @param pointersPrefix a prefix to be added if needed to all pointers. + * Use empty string if not needed else can be a string starting or not with "/" + * @param jsonPointers the pointer to the values to be decrypted + * @param symmetricKey the current symmetrical key + * @param oldSymmetricKey optional old key for fallback decryption during key rotation + * @return a new decrypted {@link org.eclipse.ditto.json.JsonObject} + * @throws ConnectionConfigurationInvalidException if decryption fails with all available keys + */ + public static JsonObject decrypt(final JsonObject jsonObject, final String pointersPrefix, + final List jsonPointers, final String symmetricKey, + final Optional oldSymmetricKey) { + return handle(jsonObject, + prefixPointers(pointersPrefix, jsonPointers).map(JsonPointer::of).collect(Collectors.toList()), + symmetricKey, + (value, key) -> decryptValueWithFallback(value, key, oldSymmetricKey)); } static String replaceUriPassword(final String uriStringRepresentation, final String patchedPassword) { @@ -124,10 +142,16 @@ private static JsonObject createPatch(final JsonPointer pointer, final String ol LOGGER.trace("<{}> value is not a uri, will encrypt whole value.", pointer); final String encryptedValue = encryptionHandler.apply(oldValue, symmetricKey); return JsonFactory.newObject(pointer, JsonValue.of(encryptedValue)); - } catch (Exception ise) { - LOGGER.warn("{} of connection value at <{}> failed", ise.getMessage(), pointer, ise); - throw new IllegalStateException( - String.format("%s of connection value at <%s> failed", ise.getMessage(), pointer), ise); + } catch (final ConnectionConfigurationInvalidException e) { + throw e; + } catch (final Exception e) { + LOGGER.warn("{} of connection value at <{}> failed", e.getMessage(), pointer, e); + throw ConnectionConfigurationInvalidException.newBuilder( + String.format("%s of connection field at <%s> failed. " + + "Verify that the configured encryption key matches the key used to encrypt the data.", + e.getMessage(), pointer)) + .cause(e) + .build(); } } @@ -136,18 +160,59 @@ private static String decryptValue(final String value, final String symmetricKey final String striped = value.replace(ENCRYPTED_PREFIX, ""); try { return EncryptorAesGcm.decryptWithPrefixIV(striped, symmetricKey); - } catch (Exception e) { - throw new IllegalStateException("Decryption", e); + } catch (final Exception e) { + throw ConnectionConfigurationInvalidException.newBuilder( + "Decryption of connection field failed. " + + "Verify that the configured encryption key matches the key used to encrypt the data.") + .cause(e) + .build(); } } return value; } + private static String decryptValueWithFallback(final String value, final String symmetricKey, + final Optional oldSymmetricKey) { + if (!value.startsWith(ENCRYPTED_PREFIX)) { + return value; + } + + final String stripped = value.replace(ENCRYPTED_PREFIX, ""); + + // Try current key first + try { + return EncryptorAesGcm.decryptWithPrefixIV(stripped, symmetricKey); + } catch (final Exception currentKeyException) { + // Current key failed — try old key if available + if (oldSymmetricKey.isPresent()) { + LOGGER.debug("Decryption with current key failed, trying old key as fallback"); + try { + return EncryptorAesGcm.decryptWithPrefixIV(stripped, oldSymmetricKey.get()); + } catch (final Exception oldKeyException) { + throw ConnectionConfigurationInvalidException.newBuilder( + "Decryption of connection field failed with both current and old keys. " + + "Verify that the configured encryption keys match the keys used to encrypt the data.") + .cause(oldKeyException) + .build(); + } + } + throw ConnectionConfigurationInvalidException.newBuilder( + "Decryption of connection field failed. " + + "Verify that the configured encryption key matches the key used to encrypt the data.") + .cause(currentKeyException) + .build(); + } + } + private static String encryptValue(final String value, final String symmetricKey) { try { return ENCRYPTED_PREFIX + EncryptorAesGcm.encryptWithPrefixIV(value, symmetricKey); - } catch (Exception e) { - throw new IllegalStateException("Encryption", e); + } catch (final Exception e) { + throw ConnectionConfigurationInvalidException.newBuilder( + "Encryption of connection field failed. " + + "Verify that the configured encryption key is a valid 256-bit AES key.") + .cause(e) + .build(); } } diff --git a/connectivity/service/src/main/resources/connectivity.conf b/connectivity/service/src/main/resources/connectivity.conf index 743274e01b6..feadf7e0144 100644 --- a/connectivity/service/src/main/resources/connectivity.conf +++ b/connectivity/service/src/main/resources/connectivity.conf @@ -213,10 +213,24 @@ ditto { encryption-enabled = false encryption-enabled = ${?CONNECTIVITY_CONNECTION_ENCRYPTION_ENABLED} - # The 256bit AES symmetrical key used for encription + # The 256bit AES symmetrical key used for encryption. + # This is THE current key used for encrypting all new data. symmetrical-key = "" symmetrical-key = ${?CONNECTIVITY_CONNECTION_ENCRYPTION_KEY} + # The old 256bit AES symmetrical key used for decryption fallback during key rotation. + # When set, the system will try to decrypt with symmetrical-key first, + # and fallback to this key if decryption fails. + # + # Key rotation workflow: + # 1. Move current symmetrical-key value to old-symmetrical-key + # 2. Set new key as symmetrical-key + # 3. Restart connectivity service (new writes use new key, reads fallback to old key) + # 4. Trigger migration via DevOps command to re-encrypt existing data + # 5. Remove old-symmetrical-key after migration completes + old-symmetrical-key = "" + old-symmetrical-key = ${?CONNECTIVITY_CONNECTION_OLD_ENCRYPTION_KEY} + # A List of Json pointers to the fields to be encrypted json-pointers = ["/uri", "/credentials/key", diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptorTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptorTest.java index 3b295fd1dcd..53bf2c97296 100644 --- a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptorTest.java +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptorTest.java @@ -14,6 +14,7 @@ package org.eclipse.ditto.connectivity.service.messaging.persistence; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import java.security.NoSuchAlgorithmException; @@ -21,8 +22,10 @@ import java.util.Optional; import org.assertj.core.api.JUnitSoftAssertions; +import org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException; import org.eclipse.ditto.connectivity.service.config.DefaultFieldsEncryptionConfig; import org.eclipse.ditto.connectivity.service.config.FieldsEncryptionConfig; +import org.eclipse.ditto.connectivity.service.util.EncryptorAesGcm; import org.eclipse.ditto.json.JsonObject; import org.eclipse.ditto.json.JsonValue; import org.junit.BeforeClass; @@ -35,6 +38,9 @@ public class JsonFieldsEncryptorTest { public static String SYMMETRICAL_KEY; + private static String KEY_A; + private static String KEY_B; + private static String KEY_C; private static FieldsEncryptionConfig TEST_CONFIG; @Rule @@ -45,6 +51,9 @@ public static void initTestFixture() throws NoSuchAlgorithmException { final Config config = ConfigFactory.load("connection-fields-encryption-test"); TEST_CONFIG = DefaultFieldsEncryptionConfig.of(config.getConfig("connection")); SYMMETRICAL_KEY = TEST_CONFIG.getSymmetricalKey(); + KEY_A = EncryptorAesGcm.generateAESKeyAsString(); + KEY_B = EncryptorAesGcm.generateAESKeyAsString(); + KEY_C = EncryptorAesGcm.generateAESKeyAsString(); } @Test @@ -93,6 +102,72 @@ public void replacePasswordInUriUserInfoWithQueryParamSameAaPwd() { assertEquals(expectedUri , patchedUri); } + @Test + public void decryptWithFallbackKeySucceeds() { + final List pointers = List.of("/credentials/password"); + final JsonObject original = JsonObject.of("{\"credentials\": {\"password\": \"secret123\"}}"); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(original, "", pointers, KEY_A); + + // Decrypt with KEY_B as current (will fail), KEY_A as fallback (should succeed) + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(encrypted, "", pointers, KEY_B, Optional.of(KEY_A)); + assertEquals(original, decrypted); + } + + @Test + public void decryptWithCurrentKeyDoesNotNeedFallback() { + final List pointers = List.of("/credentials/password"); + final JsonObject original = JsonObject.of("{\"credentials\": {\"password\": \"secret123\"}}"); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(original, "", pointers, KEY_A); + + // Decrypt with KEY_A as current — succeeds without fallback + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(encrypted, "", pointers, KEY_A, Optional.of(KEY_B)); + assertEquals(original, decrypted); + } + + @Test + public void decryptWithBothKeysWrongThrows() { + final List pointers = List.of("/credentials/password"); + final JsonObject original = JsonObject.of("{\"credentials\": {\"password\": \"secret123\"}}"); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(original, "", pointers, KEY_A); + + // Both KEY_B and KEY_C are wrong — should throw + assertThrows(ConnectionConfigurationInvalidException.class, () -> + JsonFieldsEncryptor.decrypt(encrypted, "", pointers, KEY_B, Optional.of(KEY_C))); + } + + @Test + public void decryptWithNoFallbackIsBackwardCompatible() { + final List pointers = List.of("/credentials/password"); + final JsonObject original = JsonObject.of("{\"credentials\": {\"password\": \"secret123\"}}"); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(original, "", pointers, KEY_A); + + // Old 4-arg API still works + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(encrypted, "", pointers, KEY_A); + assertEquals(original, decrypted); + } + + @Test + public void unencryptedValuePassesThroughWithFallback() { + final List pointers = List.of("/credentials/password"); + final JsonObject plain = JsonObject.of("{\"credentials\": {\"password\": \"plaintext\"}}"); + + // Value without "encrypted_" prefix is returned as-is + final JsonObject result = JsonFieldsEncryptor.decrypt(plain, "", pointers, KEY_A, Optional.of(KEY_B)); + assertEquals(plain, result); + } + + @Test + public void decryptUriWithFallbackKeySucceeds() { + final List pointers = List.of("/uri"); + final JsonObject original = JsonObject.of( + "{\"uri\": \"amqps://user:passwordValue@host:5671\"}"); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(original, "", pointers, KEY_A); + + // URI password should decrypt via fallback + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(encrypted, "", pointers, KEY_B, Optional.of(KEY_A)); + assertEquals(original, decrypted); + } + /** * Connection is not valid as it includes conflicting entries. Only for test purposes */ diff --git a/connectivity/service/src/test/resources/connection-fields-encryption-dual-key-test.conf b/connectivity/service/src/test/resources/connection-fields-encryption-dual-key-test.conf new file mode 100644 index 00000000000..3a9afad0ed5 --- /dev/null +++ b/connectivity/service/src/test/resources/connection-fields-encryption-dual-key-test.conf @@ -0,0 +1,61 @@ +# Test configuration for dual-key encryption support + +# Example 1: Normal operation with single key (standard usage) +normal-connection { + encryption { + encryption-enabled = true + symmetrical-key = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU=" + json-pointers = ["/uri", "/credentials/password"] + } +} + +# Example 2: During key rotation (dual-key configuration) +# Old data encrypted with old-symmetrical-key, new data encrypted with symmetrical-key +key-rotation-connection { + encryption { + encryption-enabled = true + symmetrical-key = "anotherBase64Key256bitsLongForAESGCM32Bytes=" + old-symmetrical-key = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU=" + json-pointers = ["/uri", "/credentials/password"] + } +} + +# Example 3: Encryption disabled (no encryption, no old key) +disabled-encryption-connection { + encryption { + encryption-enabled = false + json-pointers = ["/uri", "/credentials/password"] + } +} + +# Example 4: Encryption disabled with old key (for reading old encrypted data) +# Useful during migration from encrypted to unencrypted state +disabled-with-old-key-connection { + encryption { + encryption-enabled = false + symmetrical-key = "" + old-symmetrical-key = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU=" + json-pointers = ["/uri", "/credentials/password"] + } +} + +# Example 5: Invalid configuration - should fail validation +# (encryption enabled but no key) +invalid-no-key-connection { + encryption { + encryption-enabled = true + symmetrical-key = "" + json-pointers = ["/uri", "/credentials/password"] + } +} + +# Example 6: Invalid configuration - should fail validation +# (both keys are the same) +invalid-same-keys-connection { + encryption { + encryption-enabled = true + symmetrical-key = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU=" + old-symmetrical-key = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU=" + json-pointers = ["/uri", "/credentials/password"] + } +} From f402a2739229e335265334cf0c7ad2d0d7fd2a10 Mon Sep 17 00:00:00 2001 From: Aleksandar Stanchev Date: Sun, 15 Feb 2026 22:30:05 +0200 Subject: [PATCH 2/5] Add encryption key migration via piggyback commands Signed-off-by: Aleksandar Stanchev --- .../service/ConnectivityRootActor.java | 4 + .../config/DefaultFieldsEncryptionConfig.java | 23 +- .../config/FieldsEncryptionConfig.java | 27 +- .../persistence/EncryptionMigrationActor.java | 876 ++++++++++++++++++ .../MigrateConnectionEncryption.java | 183 ++++ .../MigrateConnectionEncryptionAbort.java | 131 +++ ...rateConnectionEncryptionAbortResponse.java | 228 +++++ .../MigrateConnectionEncryptionResponse.java | 302 ++++++ .../MigrateConnectionEncryptionStatus.java | 131 +++ ...ateConnectionEncryptionStatusResponse.java | 301 ++++++ .../src/main/resources/connectivity.conf | 12 + ...ivityServiceGlobalCommandRegistryTest.java | 8 +- ...viceGlobalCommandResponseRegistryTest.java | 8 +- .../mqtt/hivemq/MqttClientActorIT.java | 1 + .../EncryptionMigrationActorTest.java | 421 +++++++++ 15 files changed, 2651 insertions(+), 5 deletions(-) create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java index aae8a64f9cc..0001d98b691 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java @@ -33,6 +33,7 @@ import org.eclipse.ditto.connectivity.service.messaging.persistence.ConnectionPersistenceOperationsActor; import org.eclipse.ditto.connectivity.service.messaging.persistence.ConnectionPersistenceStreamingActorCreator; import org.eclipse.ditto.connectivity.service.messaging.persistence.ConnectionSupervisorActor; +import org.eclipse.ditto.connectivity.service.messaging.persistence.EncryptionMigrationActor; import org.eclipse.ditto.edge.service.dispatching.EdgeCommandForwarderActor; import org.eclipse.ditto.edge.service.dispatching.ShardRegions; import org.eclipse.ditto.internal.utils.cluster.ClusterUtil; @@ -116,6 +117,9 @@ private ConnectivityRootActor(final ConnectivityConfig connectivityConfig, ConnectionPersistenceOperationsActor.props(pubSubMediator, connectivityConfig.getMongoDbConfig(), config, connectivityConfig.getPersistenceOperationsConfig())); + startChildActor(EncryptionMigrationActor.ACTOR_NAME, + EncryptionMigrationActor.props(connectivityConfig)); + RootChildActorStarter.get(actorSystem, ScopedConfig.dittoExtension(config)).execute(getContext()); diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java index 9ee8722dd47..49e3eeee8fe 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java @@ -33,6 +33,8 @@ public final class DefaultFieldsEncryptionConfig implements FieldsEncryptionConf private final String symmetricalKey; private final String oldSymmetricalKey; private final List jsonPointers; + private final int migrationBatchSize; + private final int migrationMaxDocumentsPerMinute; private DefaultFieldsEncryptionConfig(final ConfigWithFallback config) { @@ -41,6 +43,8 @@ private DefaultFieldsEncryptionConfig(final ConfigWithFallback config) { this.oldSymmetricalKey = config.getString(ConfigValue.OLD_SYMMETRICAL_KEY.getConfigPath()); this.jsonPointers = Collections.unmodifiableList( new ArrayList<>(config.getStringList(ConfigValue.JSON_POINTERS.getConfigPath()))); + this.migrationBatchSize = config.getInt(ConfigValue.MIGRATION_BATCH_SIZE.getConfigPath()); + this.migrationMaxDocumentsPerMinute = config.getInt(ConfigValue.MIGRATION_MAX_DOCUMENTS_PER_MINUTE.getConfigPath()); validateConfiguration(); } @@ -90,6 +94,16 @@ public List getJsonPointers() { return this.jsonPointers; } + @Override + public int getMigrationBatchSize() { + return migrationBatchSize; + } + + @Override + public int getMigrationMaxDocumentsPerMinute() { + return migrationMaxDocumentsPerMinute; + } + @Override public boolean equals(final Object o) { if (this == o) { @@ -102,12 +116,15 @@ public boolean equals(final Object o) { return isEncryptionEnabled == that.isEncryptionEnabled && Objects.equals(symmetricalKey, that.symmetricalKey) && Objects.equals(oldSymmetricalKey, that.oldSymmetricalKey) && - Objects.equals(jsonPointers, that.jsonPointers); + Objects.equals(jsonPointers, that.jsonPointers) && + migrationBatchSize == that.migrationBatchSize && + migrationMaxDocumentsPerMinute == that.migrationMaxDocumentsPerMinute; } @Override public int hashCode() { - return Objects.hash(isEncryptionEnabled, symmetricalKey, oldSymmetricalKey, jsonPointers); + return Objects.hash(isEncryptionEnabled, symmetricalKey, oldSymmetricalKey, jsonPointers, + migrationBatchSize, migrationMaxDocumentsPerMinute); } @Override @@ -117,6 +134,8 @@ public String toString() { ", symmetricalKey='***'" + ", oldSymmetricalKey='" + (oldSymmetricalKey.trim().isEmpty() ? "not set" : "***") + "'" + ", jsonPointers=" + jsonPointers + + ", migrationBatchSize=" + migrationBatchSize + + ", migrationMaxDocumentsPerMinute=" + migrationMaxDocumentsPerMinute + ']'; } diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java index f20f144e311..76baa630e93 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java @@ -65,7 +65,20 @@ public interface FieldsEncryptionConfig { */ List getJsonPointers(); + /** + * Returns the batch size for the encryption migration process. + * + * @return the batch size + */ + int getMigrationBatchSize(); + /** + * Returns the maximum number of documents to migrate per minute. + * This throttles the migration stream to avoid overwhelming the database. + * + * @return the maximum documents per minute, 0 means no throttling + */ + int getMigrationMaxDocumentsPerMinute(); /** * An enumeration of the known config path expressions and their associated default values for {@code FieldsEncryptionConfig}. @@ -103,7 +116,19 @@ enum ConfigValue implements KnownConfigValue { "/credentials/parameters/sharedKey", "/credentials/clientSecret", "/credentials/password" - )); + )), + + /** + * The batch size for the encryption migration process. + */ + MIGRATION_BATCH_SIZE("migration.batch-size", 100), + + /** + * The maximum number of documents to migrate per minute. + * This throttles the migration stream to avoid overwhelming the database. + * 0 means no throttling. + */ + MIGRATION_MAX_DOCUMENTS_PER_MINUTE("migration.max-documents-per-minute", 100); private final String configPath; private final Object defaultValue; diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java new file mode 100644 index 00000000000..b58005f5803 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java @@ -0,0 +1,876 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.net.URI; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; + +import javax.annotation.Nullable; + +import org.apache.pekko.NotUsed; +import org.apache.pekko.actor.AbstractActor; +import org.apache.pekko.actor.ActorRef; +import org.apache.pekko.actor.Props; +import org.apache.pekko.actor.Status; +import org.apache.pekko.japi.pf.ReceiveBuilder; +import org.apache.pekko.stream.KillSwitches; +import org.apache.pekko.stream.Materializer; +import org.apache.pekko.stream.SharedKillSwitch; +import org.apache.pekko.stream.javadsl.Sink; +import org.apache.pekko.stream.javadsl.Source; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException; +import org.eclipse.ditto.connectivity.service.config.ConnectivityConfig; +import org.eclipse.ditto.connectivity.service.config.FieldsEncryptionConfig; +import org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson; +import org.eclipse.ditto.internal.utils.persistence.mongo.MongoClientWrapper; +import org.eclipse.ditto.internal.utils.persistence.mongo.config.MongoDbConfig; +import org.eclipse.ditto.json.JsonObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.Sorts; +import com.mongodb.client.model.WriteModel; +import com.mongodb.reactivestreams.client.MongoCollection; + +/** + * Actor that performs encryption key migration for persisted connection data. + *

+ * Reads snapshots and journal events from MongoDB, decrypts with the old key, + * re-encrypts with the new key, and batch-updates documents using Pekko Streams. + * Supports dry-run, resumption, and progress tracking. + */ +public final class EncryptionMigrationActor extends AbstractActor { + + /** + * The name of this actor. + */ + public static final String ACTOR_NAME = "encryptionMigration"; + + private static final Logger LOG = LoggerFactory.getLogger(EncryptionMigrationActor.class); + + private static final String SNAPSHOT_COLLECTION = "connection_snaps"; + private static final String JOURNAL_COLLECTION = "connection_journal"; + private static final String PROGRESS_COLLECTION = "connection_encryption_migration"; + + private static final String PHASE_SNAPSHOTS = "snapshots"; + private static final String PHASE_JOURNAL = "journal"; + private static final String PHASE_COMPLETED = "completed"; + + private static final String PROGRESS_ID = "current"; + + // MongoDB field names for pekko-persistence-mongodb + private static final String SNAPSHOT_SERIALIZED_FIELD = "s2"; + private static final String JOURNAL_EVENTS_FIELD = "events"; + private static final String JOURNAL_PAYLOAD_FIELD = "p"; + private static final String ID_FIELD = "_id"; + + // Entity type prefix used for journal event encryption + private static final String JOURNAL_ENTITY_TYPE_PREFIX = "connection"; + // Snapshot encryption uses empty prefix + private static final String SNAPSHOT_ENTITY_TYPE_PREFIX = ""; + + private final MongoClientWrapper mongoClient; + private final FieldsEncryptionConfig encryptionConfig; + private final Materializer materializer; + private final MongoCollection snapshotCollection; + private final MongoCollection journalCollection; + private final MongoCollection progressCollection; + private final int batchSize; + private final int maxDocumentsPerMinute; + + private boolean migrationInProgress = false; + private boolean currentDryRun = false; + private volatile boolean abortRequested = false; + @Nullable + private SharedKillSwitch activeKillSwitch; + @Nullable + private volatile MigrationProgress currentProgress; + + @SuppressWarnings("unused") + private EncryptionMigrationActor(final ConnectivityConfig connectivityConfig) { + final MongoDbConfig mongoDbConfig = connectivityConfig.getMongoDbConfig(); + this.encryptionConfig = connectivityConfig.getConnectionConfig().getFieldsEncryptionConfig(); + this.mongoClient = MongoClientWrapper.newInstance(mongoDbConfig); + this.materializer = Materializer.createMaterializer(this::getContext); + this.batchSize = encryptionConfig.getMigrationBatchSize(); + this.maxDocumentsPerMinute = encryptionConfig.getMigrationMaxDocumentsPerMinute(); + + final var db = mongoClient.getDefaultDatabase(); + this.snapshotCollection = db.getCollection(SNAPSHOT_COLLECTION); + this.journalCollection = db.getCollection(JOURNAL_COLLECTION); + this.progressCollection = db.getCollection(PROGRESS_COLLECTION); + } + + /** + * Creates Props for this actor. + * + * @param connectivityConfig the connectivity configuration. + * @return the Props. + */ + public static Props props(final ConnectivityConfig connectivityConfig) { + return Props.create(EncryptionMigrationActor.class, connectivityConfig); + } + + @Override + public void postStop() throws Exception { + mongoClient.close(); + super.postStop(); + } + + @Override + public Receive createReceive() { + return ReceiveBuilder.create() + .match(MigrateConnectionEncryption.class, this::handleMigration) + .match(MigrateConnectionEncryptionAbort.class, this::handleAbort) + .match(MigrateConnectionEncryptionStatus.class, this::handleStatus) + .build(); + } + + private void handleStatus(final MigrateConnectionEncryptionStatus command) { + final ActorRef sender = getSender(); + final MigrationProgress inMemory = currentProgress; + if (inMemory != null) { + // Use in-memory progress (available during and after migration, including dry-run) + final String phase = migrationInProgress + ? "in_progress:" + inMemory.phase + : inMemory.phase; + sender.tell(MigrateConnectionEncryptionStatusResponse.of( + phase, + inMemory.snapshotsProcessed, inMemory.snapshotsSkipped, inMemory.snapshotsFailed, + inMemory.journalProcessed, inMemory.journalSkipped, inMemory.journalFailed, + inMemory.lastProcessedSnapshotId, inMemory.lastProcessedSnapshotPid, + inMemory.lastProcessedJournalId, inMemory.lastProcessedJournalPid, + inMemory.startedAt, Instant.now().toString(), + currentDryRun, + migrationInProgress, + command.getDittoHeaders()), getSelf()); + } else { + // Fall back to MongoDB (e.g. after service restart) + loadProgress().whenComplete((optProgress, error) -> { + if (error != null) { + sender.tell(new Status.Failure(error), getSelf()); + } else { + final MigrationProgress progress = optProgress.orElseGet(MigrationProgress::new); + sender.tell(MigrateConnectionEncryptionStatusResponse.of( + progress.phase, + progress.snapshotsProcessed, progress.snapshotsSkipped, progress.snapshotsFailed, + progress.journalProcessed, progress.journalSkipped, progress.journalFailed, + progress.lastProcessedSnapshotId, progress.lastProcessedSnapshotPid, + progress.lastProcessedJournalId, progress.lastProcessedJournalPid, + progress.startedAt, Instant.now().toString(), + false, + false, + command.getDittoHeaders()), getSelf()); + } + }); + } + } + + private void handleAbort(final MigrateConnectionEncryptionAbort command) { + final ActorRef sender = getSender(); + + if (!migrationInProgress) { + sender.tell(new Status.Failure(new IllegalStateException( + "No migration is currently running.")), getSelf()); + return; + } + + LOG.info("Abort requested for running migration"); + abortRequested = true; + if (activeKillSwitch != null) { + activeKillSwitch.shutdown(); + } + + final MigrationProgress progress = currentProgress != null ? currentProgress : new MigrationProgress(); + sender.tell(MigrateConnectionEncryptionAbortResponse.of( + "aborted:" + progress.phase, + progress.snapshotsProcessed, progress.snapshotsSkipped, progress.snapshotsFailed, + progress.journalProcessed, progress.journalSkipped, progress.journalFailed, + Instant.now().toString(), + command.getDittoHeaders()), getSelf()); + } + + private void handleMigration(final MigrateConnectionEncryption command) { + final ActorRef sender = getSender(); + + if (migrationInProgress) { + sender.tell(new Status.Failure(new IllegalStateException( + "Migration already in progress. Use migrateEncryptionStatus to check progress " + + "or migrateEncryptionAbort to cancel.")), + getSelf()); + return; + } + + // Migration Logic: + // - Encryption enabled + both keys set → Key rotation (decrypt with old, encrypt with new) + // - Encryption enabled + only current key → Initial encryption (encrypt plaintext with key) + // - Encryption disabled + old key set → Disable workflow (decrypt with old, write plaintext) + // - Encryption disabled + no keys → Error (cannot migrate) + + final Optional oldKeyOpt = encryptionConfig.getOldSymmetricalKey(); + final boolean isDisableWorkflow = !encryptionConfig.isEncryptionEnabled() && oldKeyOpt.isPresent(); + final boolean isInitialEncryption = encryptionConfig.isEncryptionEnabled() && oldKeyOpt.isEmpty(); + + if (!encryptionConfig.isEncryptionEnabled() && !isDisableWorkflow) { + sender.tell(new Status.Failure(new IllegalStateException( + "Encryption is not enabled and no old key configured. " + + "Cannot migrate without encryption keys.")), + getSelf()); + return; + } + + final String newKey; + final String oldKey; + if (isDisableWorkflow) { + // Decrypt with old key, write plaintext + newKey = null; + oldKey = oldKeyOpt.get(); + } else if (isInitialEncryption) { + // Encrypt plaintext data with current key (no old key needed) + newKey = encryptionConfig.getSymmetricalKey(); + oldKey = null; + } else { + // Key rotation: decrypt with old key, encrypt with new key + newKey = encryptionConfig.getSymmetricalKey(); + oldKey = oldKeyOpt.get(); + } + final List pointers = encryptionConfig.getJsonPointers(); + final boolean dryRun = command.isDryRun(); + final boolean resume = command.isResume(); + + migrationInProgress = true; + currentDryRun = dryRun; + abortRequested = false; + currentProgress = null; + activeKillSwitch = KillSwitches.shared("encryption-migration"); + final String mode = isDisableWorkflow ? "disable" : isInitialEncryption ? "initial-encryption" : "key-rotation"; + LOG.info("Starting encryption migration (mode={}, dryRun={}, resume={})", mode, dryRun, resume); + + final CompletionStage migrationResult; + if (resume) { + migrationResult = loadProgress().thenCompose(optProgress -> { + if (optProgress.isEmpty() || PHASE_COMPLETED.equals(optProgress.get().phase)) { + // No previous migration exists or it already completed — nothing to resume + final String reason = optProgress.isEmpty() + ? "no previous migration found" : "previous migration already completed"; + LOG.info("Resume requested but {}, nothing to do", reason); + migrationInProgress = false; + final MigrationProgress completed = optProgress.orElseGet(MigrationProgress::new) + .withPhase(PHASE_COMPLETED); + currentProgress = completed; + sender.tell(MigrateConnectionEncryptionResponse.alreadyCompleted( + Instant.now().toString(), command.getDittoHeaders()), getSelf()); + return java.util.concurrent.CompletableFuture.completedFuture(completed); + } + final MigrationProgress progress = optProgress.get(); + sender.tell(MigrateConnectionEncryptionResponse.accepted( + true, Instant.now().toString(), dryRun, command.getDittoHeaders()), getSelf()); + return runMigration(progress, oldKey, newKey, pointers, dryRun); + }); + } else { + migrationResult = deleteProgress().thenCompose(v -> + runMigration(new MigrationProgress(), oldKey, newKey, pointers, dryRun)); + // Reply immediately with 202 Accepted + sender.tell(MigrateConnectionEncryptionResponse.accepted( + false, Instant.now().toString(), dryRun, command.getDittoHeaders()), getSelf()); + } + + migrationResult.whenComplete((progress, error) -> { + migrationInProgress = false; + activeKillSwitch = null; + final boolean wasAborted = abortRequested; + abortRequested = false; + if (error != null && !wasAborted) { + LOG.error("Encryption migration failed", error); + } else { + final String finalPhase = wasAborted + ? "aborted:" + (progress != null ? progress.phase : "unknown") + : progress.phase; + LOG.info("Encryption migration {} (dryRun={}): {}", + wasAborted ? "aborted" : "completed", dryRun, progress); + if (progress != null) { + currentProgress = progress.withPhase(finalPhase); + if (wasAborted && !dryRun) { + saveProgress(progress.withPhase("aborted")).toCompletableFuture().join(); + } + } + } + }); + } + + private CompletionStage runMigration(final MigrationProgress initialProgress, + final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + + final CompletionStage afterSnapshots; + if (PHASE_JOURNAL.equals(initialProgress.phase)) { + // Resume from journal phase — snapshots already done + afterSnapshots = java.util.concurrent.CompletableFuture.completedFuture(initialProgress); + } else if (PHASE_COMPLETED.equals(initialProgress.phase)) { + return java.util.concurrent.CompletableFuture.completedFuture(initialProgress); + } else { + afterSnapshots = migrateSnapshots(initialProgress, oldKey, newKey, pointers, dryRun); + } + + return afterSnapshots.thenCompose(progress -> { + if (abortRequested) { + return java.util.concurrent.CompletableFuture.completedFuture(progress); + } + final MigrationProgress journalProgress = progress.withPhase(PHASE_JOURNAL); + return migrateJournal(journalProgress, oldKey, newKey, pointers, dryRun); + }).thenCompose(progress -> { + if (abortRequested) { + return java.util.concurrent.CompletableFuture.completedFuture(progress); + } + final MigrationProgress completed = progress.withPhase(PHASE_COMPLETED); + if (!dryRun) { + return saveProgress(completed).thenApply(v -> completed); + } + return java.util.concurrent.CompletableFuture.completedFuture(completed); + }); + } + + private CompletionStage migrateSnapshots(final MigrationProgress progress, + final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + + LOG.info("Starting snapshot migration (dryRun={}, throttling={} docs/min)", dryRun, + maxDocumentsPerMinute > 0 ? maxDocumentsPerMinute : "disabled"); + final Bson resumeFilter = progress.lastProcessedSnapshotId != null + ? Filters.gt(ID_FIELD, progress.lastProcessedSnapshotId) + : Filters.empty(); + final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( + SNAPSHOT_SERIALIZED_FIELD, SNAPSHOT_ENTITY_TYPE_PREFIX, pointers); + final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); + + final Source source = Source.fromPublisher( + snapshotCollection.find(filter) + .sort(Sorts.ascending(ID_FIELD)) + .batchSize(batchSize)); + + final Source throttledSource = applyThrottling(source, maxDocumentsPerMinute); + + return throttledSource + .via(activeKillSwitch.flow()) + .grouped(batchSize) + .runWith(Sink.fold(progress, (currentProgress, batch) -> + processSnapshotBatch(currentProgress, batch, oldKey, newKey, pointers, dryRun)), + materializer) + .thenApply(finalProgress -> { + LOG.info("Snapshot migration {}: processed={}, skipped={}, failed={}", + abortRequested ? "aborted" : "done", + finalProgress.snapshotsProcessed, finalProgress.snapshotsSkipped, + finalProgress.snapshotsFailed); + return finalProgress; + }); + } + + private MigrationProgress processSnapshotBatch(final MigrationProgress progress, + final List batch, final String oldKey, final String newKey, + final List pointers, final boolean dryRun) { + + MigrationProgress currentProgress = progress; + final List> writeModels = new ArrayList<>(); + + for (final Document doc : batch) { + final String docId = doc.get(ID_FIELD).toString(); + final String pid = doc.getString("pid"); + try { + final Document s2 = doc.get(SNAPSHOT_SERIALIZED_FIELD, Document.class); + if (s2 == null) { + currentProgress = currentProgress.incrementSnapshotsSkipped(); + // Update last processed ID and PID even when skipped + currentProgress = currentProgress.withLastSnapshotId(docId); + currentProgress = currentProgress.withLastSnapshotPid(pid); + continue; + } + + final BsonDocument bsonDoc = s2.toBsonDocument(Document.class, + com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); + final JsonObject jsonObject = DittoBsonJson.getInstance().serialize(bsonDoc); + + final JsonObject reEncrypted = reEncryptFields(jsonObject, SNAPSHOT_ENTITY_TYPE_PREFIX, + pointers, oldKey, newKey); + + if (reEncrypted == null) { + // Already encrypted with new key + currentProgress = currentProgress.incrementSnapshotsSkipped(); + } else { + if (!dryRun) { + final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted); + doc.put(SNAPSHOT_SERIALIZED_FIELD, Document.parse(newBson.toJson())); + writeModels.add(new ReplaceOneModel<>( + Filters.eq(ID_FIELD, doc.get(ID_FIELD)), + doc)); + } + currentProgress = currentProgress.incrementSnapshotsProcessed(); + } + } catch (final Exception e) { + LOG.warn("Failed to process snapshot {} (pid={}): {}", docId, pid, e.getMessage()); + currentProgress = currentProgress.incrementSnapshotsFailed(); + } + // Update last processed ID and PID for EVERY document + currentProgress = currentProgress.withLastSnapshotId(docId); + currentProgress = currentProgress.withLastSnapshotPid(pid); + } + + // Perform bulk write if there are changes + if (!dryRun && !writeModels.isEmpty()) { + try { + Source.fromPublisher(snapshotCollection.bulkWrite(writeModels, + new BulkWriteOptions().ordered(false))) + .runWith(Sink.head(), materializer) + .toCompletableFuture().join(); + } catch (final Exception e) { + LOG.error("Bulk write failed for snapshot batch: {}", e.getMessage()); + // Continue to save progress even if bulk write fails + } + } + + // Save progress to MongoDB for non-dry-run; always update in-memory for status queries + if (!dryRun) { + final MigrationProgress progressToSave = currentProgress.withPhase(PHASE_SNAPSHOTS); + try { + saveProgress(progressToSave).toCompletableFuture().join(); + } catch (final Exception e) { + LOG.error("Failed to save progress after snapshot batch: {}", e.getMessage()); + } + } + this.currentProgress = currentProgress; + + return currentProgress; + } + + private CompletionStage migrateJournal(final MigrationProgress progress, + final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + + LOG.info("Starting journal migration (dryRun={}, throttling={} docs/min)", dryRun, + maxDocumentsPerMinute > 0 ? maxDocumentsPerMinute : "disabled"); + final Bson resumeFilter = progress.lastProcessedJournalId != null + ? Filters.gt(ID_FIELD, new ObjectId(progress.lastProcessedJournalId)) + : Filters.empty(); + final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( + JOURNAL_EVENTS_FIELD + "." + JOURNAL_PAYLOAD_FIELD, + JOURNAL_ENTITY_TYPE_PREFIX, pointers); + final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); + + final Source source = Source.fromPublisher( + journalCollection.find(filter) + .sort(Sorts.ascending(ID_FIELD)) + .batchSize(batchSize)); + + final Source throttledSource = applyThrottling(source, maxDocumentsPerMinute); + + return throttledSource + .via(activeKillSwitch.flow()) + .grouped(batchSize) + .runWith(Sink.fold(progress, (currentProgress, batch) -> + processJournalBatch(currentProgress, batch, oldKey, newKey, pointers, dryRun)), + materializer) + .thenApply(finalProgress -> { + LOG.info("Journal migration {}: processed={}, skipped={}, failed={}", + abortRequested ? "aborted" : "done", + finalProgress.journalProcessed, finalProgress.journalSkipped, + finalProgress.journalFailed); + return finalProgress; + }); + } + + private MigrationProgress processJournalBatch(final MigrationProgress progress, + final List batch, final String oldKey, final String newKey, + final List pointers, final boolean dryRun) { + + MigrationProgress currentProgress = progress; + final List> writeModels = new ArrayList<>(); + + for (final Document doc : batch) { + final Object docId = doc.get(ID_FIELD); + final String docIdStr = docId.toString(); + final String pid = doc.getString("pid"); + try { + final List events = doc.getList(JOURNAL_EVENTS_FIELD, Document.class); + if (events == null || events.isEmpty()) { + currentProgress = currentProgress.incrementJournalSkipped(); + // Update last processed ID and PID even when skipped + currentProgress = currentProgress.withLastJournalId(docIdStr); + currentProgress = currentProgress.withLastJournalPid(pid); + continue; + } + + boolean anyChanged = false; + final List updatedEvents = new ArrayList<>(events.size()); + + for (final Document event : events) { + final Document payload = event.get(JOURNAL_PAYLOAD_FIELD, Document.class); + if (payload == null) { + updatedEvents.add(event); + continue; + } + + final BsonDocument bsonPayload = payload.toBsonDocument(Document.class, + com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); + final JsonObject jsonPayload = DittoBsonJson.getInstance().serialize(bsonPayload); + + final JsonObject reEncrypted = reEncryptFields(jsonPayload, JOURNAL_ENTITY_TYPE_PREFIX, + pointers, oldKey, newKey); + + if (reEncrypted != null) { + if (!dryRun) { + final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted); + event.put(JOURNAL_PAYLOAD_FIELD, Document.parse(newBson.toJson())); + } + anyChanged = true; + } + updatedEvents.add(event); + } + + if (anyChanged) { + if (!dryRun) { + doc.put(JOURNAL_EVENTS_FIELD, updatedEvents); + writeModels.add(new ReplaceOneModel<>( + Filters.eq(ID_FIELD, docId), + doc)); + } + currentProgress = currentProgress.incrementJournalProcessed(); + } else { + currentProgress = currentProgress.incrementJournalSkipped(); + } + } catch (final Exception e) { + LOG.warn("Failed to process journal document {} (pid={}): {}", docIdStr, pid, e.getMessage()); + currentProgress = currentProgress.incrementJournalFailed(); + } + // Update last processed ID and PID for EVERY document + currentProgress = currentProgress.withLastJournalId(docIdStr); + currentProgress = currentProgress.withLastJournalPid(pid); + } + + // Perform bulk write if there are changes + if (!dryRun && !writeModels.isEmpty()) { + try { + Source.fromPublisher(journalCollection.bulkWrite(writeModels, + new BulkWriteOptions().ordered(false))) + .runWith(Sink.head(), materializer) + .toCompletableFuture().join(); + } catch (final Exception e) { + LOG.error("Bulk write failed for journal batch: {}", e.getMessage()); + // Continue to save progress even if bulk write fails + } + } + + // Save progress to MongoDB for non-dry-run; always update in-memory for status queries + if (!dryRun) { + final MigrationProgress progressToSave = currentProgress.withPhase(PHASE_JOURNAL); + try { + saveProgress(progressToSave).toCompletableFuture().join(); + } catch (final Exception e) { + LOG.error("Failed to save progress after journal batch: {}", e.getMessage()); + } + } + this.currentProgress = currentProgress; + + return currentProgress; + } + + /** + * Applies throttling to the source stream if throttling is enabled (maxDocsPerMinute > 0). + * Throttling is implemented using Pekko Streams throttle operator. + * + * @param source the source stream + * @param maxDocsPerMinute maximum documents per minute, 0 means no throttling + * @return throttled source if enabled, original source otherwise + */ + private Source applyThrottling(final Source source, + final int maxDocsPerMinute) { + if (maxDocsPerMinute <= 0) { + return source; + } + + // Throttle directly using the configured docs/minute rate. + // Pekko Streams throttle uses a token-bucket algorithm, so bursts up to maxDocsPerMinute + // are allowed as long as the average rate stays within the limit. + return source.throttle(maxDocsPerMinute, java.time.Duration.ofMinutes(1)); + } + + /** + * Re-encrypts fields in a JSON object based on the migration mode. + * + *

Supports three modes: + *

    + *
  • Initial encryption ({@code oldKey == null}): encrypt plaintext with newKey
  • + *
  • Key rotation (both keys set): decrypt with oldKey, encrypt with newKey
  • + *
  • Disable encryption ({@code newKey == null}): decrypt with oldKey, write plaintext
  • + *
+ * + * @param oldKey the old encryption key, or {@code null} for initial encryption (plaintext data) + * @param newKey the new encryption key, or {@code null} to disable encryption (write plaintext) + * @return the transformed JSON object, or {@code null} if already in the desired state (skip). + */ + static JsonObject reEncryptFields(final JsonObject jsonObject, final String entityTypePrefix, + final List pointers, @Nullable final String oldKey, @Nullable final String newKey) { + + if (oldKey == null && newKey != null) { + // Initial encryption: data is plaintext, encrypt with new key + // Check if any field already has the encrypted_ prefix — if so, skip + final boolean alreadyEncrypted = pointers.stream() + .map(p -> entityTypePrefix + p) + .map(org.eclipse.ditto.json.JsonPointer::of) + .flatMap(pointer -> jsonObject.getValue(pointer).stream()) + .filter(org.eclipse.ditto.json.JsonValue::isString) + .anyMatch(v -> containsEncryptedValue(v.asString())); + if (alreadyEncrypted) { + return null; + } + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(jsonObject, entityTypePrefix, pointers, newKey); + // Skip if encrypt produced no changes (e.g. no matching pointers in this entity) + return encrypted.equals(jsonObject) ? null : encrypted; + } + + // Key rotation or disable workflow — oldKey must be set + // Try decrypting with the old key + try { + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, + pointers, oldKey); + + if (newKey == null) { + // Disable workflow: return decrypted plaintext, but skip if nothing changed + // (decrypt silently passes through plaintext values, so unchanged means already plain) + return decrypted.equals(jsonObject) ? null : decrypted; + } else { + // Key rotation: re-encrypt with new key + final JsonObject reEncrypted = JsonFieldsEncryptor.encrypt(decrypted, entityTypePrefix, pointers, newKey); + // Skip if the result is identical (e.g. no matching pointers in this entity) + return reEncrypted.equals(jsonObject) ? null : reEncrypted; + } + } catch (final ConnectionConfigurationInvalidException e) { + // Old key failed — try new key to see if already migrated + // (Only applicable for key rotation, not disable workflow) + if (newKey == null) { + // Disable workflow: if old key fails, data is already plaintext - skip + return null; + } + + try { + JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, pointers, newKey); + // Already encrypted with new key — skip + return null; + } catch (final ConnectionConfigurationInvalidException e2) { + // Both keys failed — data might be plaintext, try encrypting directly + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(jsonObject, entityTypePrefix, pointers, newKey); + return encrypted.equals(jsonObject) ? null : encrypted; + } + } + } + + /** + * Checks whether a string value contains an encrypted portion — either as a direct + * {@code encrypted_} prefix (non-URI fields) or embedded in the password part of a URI. + */ + private static boolean containsEncryptedValue(final String value) { + if (value.startsWith(JsonFieldsEncryptor.ENCRYPTED_PREFIX)) { + return true; + } + try { + final URI uri = new URI(value); + if (uri.getScheme() != null && uri.getRawUserInfo() != null) { + final String[] userPass = uri.getRawUserInfo().split(":", 2); + return userPass.length == 2 && + userPass[1].startsWith(JsonFieldsEncryptor.ENCRYPTED_PREFIX); + } + } catch (final Exception ignored) { + // Not a valid URI — fall through + } + return false; + } + + /** + * Builds a MongoDB filter that matches only documents containing at least one of the + * encryptable fields. This avoids fetching documents (e.g. empty events) that have + * no fields to encrypt/decrypt. + * + * @param documentPrefix the BSON path prefix to the document (e.g. "s2" for snapshots, + * "events.p" for journal payloads) + * @param entityTypePrefix the entity type prefix applied to pointers (e.g. "connection" for journal) + * @param pointers the configured JSON pointers to encrypt + * @return a Bson filter requiring at least one encryptable field to exist + */ + private static Bson buildEncryptableFieldsFilter(final String documentPrefix, + final String entityTypePrefix, final List pointers) { + final String prefix = entityTypePrefix.isEmpty() + ? documentPrefix + : documentPrefix + "." + entityTypePrefix; + final List existsFilters = pointers.stream() + .map(pointer -> pointer.replace("/", ".")) + .map(dotPath -> Filters.exists(prefix + dotPath)) + .collect(Collectors.toList()); + return Filters.or(existsFilters); + } + + private CompletionStage saveProgress(final MigrationProgress progress) { + final Document progressDoc = new Document() + .append(ID_FIELD, PROGRESS_ID) + .append("phase", progress.phase) + .append("lastProcessedSnapshotId", progress.lastProcessedSnapshotId) + .append("lastProcessedSnapshotPid", progress.lastProcessedSnapshotPid) + .append("lastProcessedJournalId", progress.lastProcessedJournalId) + .append("lastProcessedJournalPid", progress.lastProcessedJournalPid) + .append("snapshotsProcessed", progress.snapshotsProcessed) + .append("snapshotsSkipped", progress.snapshotsSkipped) + .append("snapshotsFailed", progress.snapshotsFailed) + .append("journalProcessed", progress.journalProcessed) + .append("journalSkipped", progress.journalSkipped) + .append("journalFailed", progress.journalFailed) + .append("startedAt", progress.startedAt) + .append("updatedAt", Instant.now().toString()); + + return Source.fromPublisher( + progressCollection.replaceOne( + Filters.eq(ID_FIELD, PROGRESS_ID), + progressDoc, + new ReplaceOptions().upsert(true))) + .runWith(Sink.ignore(), materializer) + .thenApply(done -> null); + } + + private CompletionStage> loadProgress() { + return Source.fromPublisher( + progressCollection.find(Filters.eq(ID_FIELD, PROGRESS_ID)).first()) + .runWith(Sink.headOption(), materializer) + .thenApply(optDoc -> optDoc.map(doc -> { + final MigrationProgress progress = new MigrationProgress(); + progress.phase = doc.getString("phase"); + progress.lastProcessedSnapshotId = doc.getString("lastProcessedSnapshotId"); + progress.lastProcessedSnapshotPid = doc.getString("lastProcessedSnapshotPid"); + progress.lastProcessedJournalId = doc.getString("lastProcessedJournalId"); + progress.lastProcessedJournalPid = doc.getString("lastProcessedJournalPid"); + progress.snapshotsProcessed = doc.getLong("snapshotsProcessed") != null + ? doc.getLong("snapshotsProcessed") : 0L; + progress.snapshotsSkipped = doc.getLong("snapshotsSkipped") != null + ? doc.getLong("snapshotsSkipped") : 0L; + progress.snapshotsFailed = doc.getLong("snapshotsFailed") != null + ? doc.getLong("snapshotsFailed") : 0L; + progress.journalProcessed = doc.getLong("journalProcessed") != null + ? doc.getLong("journalProcessed") : 0L; + progress.journalSkipped = doc.getLong("journalSkipped") != null + ? doc.getLong("journalSkipped") : 0L; + progress.journalFailed = doc.getLong("journalFailed") != null + ? doc.getLong("journalFailed") : 0L; + progress.startedAt = doc.getString("startedAt"); + return progress; + })); + } + + private CompletionStage deleteProgress() { + return Source.fromPublisher( + progressCollection.deleteOne(Filters.eq(ID_FIELD, PROGRESS_ID))) + .runWith(Sink.ignore(), materializer) + .thenApply(done -> null); + } + + /** + * Mutable progress tracker for the migration process. + */ + static final class MigrationProgress { + String phase = PHASE_SNAPSHOTS; + String lastProcessedSnapshotId; + String lastProcessedSnapshotPid; + String lastProcessedJournalId; + String lastProcessedJournalPid; + long snapshotsProcessed; + long snapshotsSkipped; + long snapshotsFailed; + long journalProcessed; + long journalSkipped; + long journalFailed; + String startedAt = Instant.now().toString(); + + MigrationProgress withPhase(final String newPhase) { + this.phase = newPhase; + return this; + } + + MigrationProgress withLastSnapshotId(final String id) { + this.lastProcessedSnapshotId = id; + return this; + } + + MigrationProgress withLastSnapshotPid(final String pid) { + this.lastProcessedSnapshotPid = pid; + return this; + } + + MigrationProgress withLastJournalId(final String id) { + this.lastProcessedJournalId = id; + return this; + } + + MigrationProgress withLastJournalPid(final String pid) { + this.lastProcessedJournalPid = pid; + return this; + } + + MigrationProgress incrementSnapshotsProcessed() { + this.snapshotsProcessed++; + return this; + } + + MigrationProgress incrementSnapshotsSkipped() { + this.snapshotsSkipped++; + return this; + } + + MigrationProgress incrementSnapshotsFailed() { + this.snapshotsFailed++; + return this; + } + + MigrationProgress incrementJournalProcessed() { + this.journalProcessed++; + return this; + } + + MigrationProgress incrementJournalSkipped() { + this.journalSkipped++; + return this; + } + + MigrationProgress incrementJournalFailed() { + this.journalFailed++; + return this; + } + + @Override + public String toString() { + return "MigrationProgress[" + + "phase=" + phase + + ", snapshots(processed=" + snapshotsProcessed + + ", skipped=" + snapshotsSkipped + + ", failed=" + snapshotsFailed + ")" + + ", journal(processed=" + journalProcessed + + ", skipped=" + journalSkipped + + ", failed=" + journalFailed + ")" + + "]"; + } + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java new file mode 100644 index 00000000000..51e8763d9b4 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.Objects; +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommand; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommand; +import org.eclipse.ditto.base.model.signals.commands.CommandJsonDeserializer; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonFieldDefinition; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Command to trigger re-encryption of all persisted connection data (snapshots and journal events) + * from the old encryption key to the new encryption key. + *

+ * This command is sent via the DevOps piggyback endpoint to the {@link EncryptionMigrationActor}. + */ +@Immutable +@JsonParsableCommand(typePrefix = MigrateConnectionEncryption.TYPE_PREFIX, name = MigrateConnectionEncryption.NAME) +public final class MigrateConnectionEncryption extends AbstractCommand { + + static final String TYPE_PREFIX = "connectivity.commands:"; + static final String NAME = "migrateEncryption"; + + /** + * Type of this command. + */ + public static final String TYPE = TYPE_PREFIX + NAME; + + static final JsonFieldDefinition JSON_DRY_RUN = + JsonFactory.newBooleanFieldDefinition("dryRun", JsonSchemaVersion.V_2); + + static final JsonFieldDefinition JSON_RESUME = + JsonFactory.newBooleanFieldDefinition("resume", JsonSchemaVersion.V_2); + + private static final String RESOURCE_TYPE = "connectivity"; + + private final boolean dryRun; + private final boolean resume; + + private MigrateConnectionEncryption(final boolean dryRun, final boolean resume, + final DittoHeaders dittoHeaders) { + super(TYPE, dittoHeaders); + this.dryRun = dryRun; + this.resume = resume; + } + + /** + * Creates a new {@code MigrateConnectionEncryption} command. + * + * @param dryRun whether to only count affected documents without making changes. + * @param resume whether to resume from last saved progress. + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryption of(final boolean dryRun, final boolean resume, + final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryption(dryRun, resume, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryption} from a JSON object. + * + * @param jsonObject the JSON object of which the command is to be created. + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryption fromJson(final JsonObject jsonObject, final DittoHeaders dittoHeaders) { + return new CommandJsonDeserializer(TYPE, jsonObject).deserialize( + () -> { + final boolean dryRun = jsonObject.getValue(JSON_DRY_RUN).orElse(false); + final boolean resume = jsonObject.getValue(JSON_RESUME).orElse(false); + return of(dryRun, resume, dittoHeaders); + }); + } + + /** + * Returns whether this is a dry-run (count only, no changes). + * + * @return {@code true} if dry-run. + */ + public boolean isDryRun() { + return dryRun; + } + + /** + * Returns whether to resume from last saved progress. + * + * @return {@code true} if resuming. + */ + public boolean isResume() { + return resume; + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + final Predicate predicate = schemaVersion.and(thePredicate); + jsonObjectBuilder.set(JSON_DRY_RUN, dryRun, predicate); + jsonObjectBuilder.set(JSON_RESUME, resume, predicate); + } + + @Override + public String getTypePrefix() { + return TYPE_PREFIX; + } + + @Override + public Category getCategory() { + return Category.MODIFY; + } + + @Override + public MigrateConnectionEncryption setDittoHeaders(final DittoHeaders dittoHeaders) { + return of(dryRun, resume, dittoHeaders); + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryption; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final MigrateConnectionEncryption that = (MigrateConnectionEncryption) o; + return dryRun == that.dryRun && resume == that.resume; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), dryRun, resume); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + + super.toString() + + ", dryRun=" + dryRun + + ", resume=" + resume + + "]"; + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java new file mode 100644 index 00000000000..7cd5106d6bf --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommand; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommand; +import org.eclipse.ditto.base.model.signals.commands.CommandJsonDeserializer; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Command to abort a currently running encryption migration. + *

+ * If no migration is running, responds with an error. Otherwise cancels the running stream + * after the current batch, saves progress, and responds with the progress at the time of abort. + */ +@Immutable +@JsonParsableCommand(typePrefix = MigrateConnectionEncryptionAbort.TYPE_PREFIX, + name = MigrateConnectionEncryptionAbort.NAME) +public final class MigrateConnectionEncryptionAbort extends AbstractCommand { + + static final String TYPE_PREFIX = "connectivity.commands:"; + static final String NAME = "migrateEncryptionAbort"; + + /** + * Type of this command. + */ + public static final String TYPE = TYPE_PREFIX + NAME; + + private static final String RESOURCE_TYPE = "connectivity"; + + private MigrateConnectionEncryptionAbort(final DittoHeaders dittoHeaders) { + super(TYPE, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionAbort} command. + * + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryptionAbort of(final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionAbort(dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionAbort} from a JSON object. + * + * @param jsonObject the JSON object of which the command is to be created. + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryptionAbort fromJson(final JsonObject jsonObject, + final DittoHeaders dittoHeaders) { + return new CommandJsonDeserializer(TYPE, jsonObject) + .deserialize(() -> of(dittoHeaders)); + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + // no payload fields + } + + @Override + public String getTypePrefix() { + return TYPE_PREFIX; + } + + @Override + public Category getCategory() { + return Category.MODIFY; + } + + @Override + public MigrateConnectionEncryptionAbort setDittoHeaders(final DittoHeaders dittoHeaders) { + return of(dittoHeaders); + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryptionAbort; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + super.toString() + "]"; + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java new file mode 100644 index 00000000000..3398ea5ced7 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.Collections; +import java.util.Objects; +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.common.HttpStatus; +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommandResponse; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommandResponse; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseHttpStatusValidator; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseJsonDeserializer; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonFieldDefinition; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Response to a {@link MigrateConnectionEncryptionAbort} command containing final migration state. + */ +@Immutable +@JsonParsableCommandResponse(type = MigrateConnectionEncryptionAbortResponse.TYPE) +public final class MigrateConnectionEncryptionAbortResponse + extends AbstractCommandResponse { + + static final String TYPE_PREFIX = "connectivity.responses:"; + + /** + * Type of this response. + */ + public static final String TYPE = TYPE_PREFIX + MigrateConnectionEncryptionAbort.NAME; + + static final JsonFieldDefinition JSON_PHASE = + JsonFactory.newStringFieldDefinition("phase", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_SNAPSHOTS = + JsonFactory.newJsonObjectFieldDefinition("snapshots", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_JOURNAL_EVENTS = + JsonFactory.newJsonObjectFieldDefinition("journalEvents", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_ABORTED_AT = + JsonFactory.newStringFieldDefinition("abortedAt", JsonSchemaVersion.V_2); + + private static final String RESOURCE_TYPE = "connectivity"; + private static final HttpStatus HTTP_STATUS = HttpStatus.OK; + + private static final CommandResponseJsonDeserializer JSON_DESERIALIZER = + CommandResponseJsonDeserializer.newInstance(TYPE, + context -> { + final JsonObject jsonObject = context.getJsonObject(); + return new MigrateConnectionEncryptionAbortResponse( + jsonObject.getValueOrThrow(JSON_PHASE), + jsonObject.getValueOrThrow(JSON_SNAPSHOTS), + jsonObject.getValueOrThrow(JSON_JOURNAL_EVENTS), + jsonObject.getValueOrThrow(JSON_ABORTED_AT), + context.getDeserializedHttpStatus(), + context.getDittoHeaders()); + }); + + private final String phase; + private final JsonObject snapshots; + private final JsonObject journalEvents; + private final String abortedAt; + + private MigrateConnectionEncryptionAbortResponse(final String phase, + final JsonObject snapshots, + final JsonObject journalEvents, + final String abortedAt, + final HttpStatus httpStatus, + final DittoHeaders dittoHeaders) { + super(TYPE, + CommandResponseHttpStatusValidator.validateHttpStatus(httpStatus, + Collections.singleton(HTTP_STATUS), + MigrateConnectionEncryptionAbortResponse.class), + dittoHeaders); + this.phase = phase; + this.snapshots = snapshots; + this.journalEvents = journalEvents; + this.abortedAt = abortedAt; + } + + /** + * Creates a new {@code MigrateConnectionEncryptionAbortResponse}. + * + * @param phase the current migration phase at time of abort. + * @param snapshotsProcessed number of snapshots processed. + * @param snapshotsSkipped number of snapshots skipped. + * @param snapshotsFailed number of snapshots that failed. + * @param journalProcessed number of journal documents processed. + * @param journalSkipped number of journal documents skipped. + * @param journalFailed number of journal documents that failed. + * @param abortedAt when migration was aborted (ISO-8601 timestamp). + * @param dittoHeaders the headers. + * @return the response. + */ + public static MigrateConnectionEncryptionAbortResponse of(final String phase, + final long snapshotsProcessed, final long snapshotsSkipped, final long snapshotsFailed, + final long journalProcessed, final long journalSkipped, final long journalFailed, + final String abortedAt, + final DittoHeaders dittoHeaders) { + + final JsonObject snapshots = JsonFactory.newObjectBuilder() + .set("processed", snapshotsProcessed) + .set("skipped", snapshotsSkipped) + .set("failed", snapshotsFailed) + .build(); + final JsonObject journal = JsonFactory.newObjectBuilder() + .set("processed", journalProcessed) + .set("skipped", journalSkipped) + .set("failed", journalFailed) + .build(); + return new MigrateConnectionEncryptionAbortResponse(phase, snapshots, journal, abortedAt, + HTTP_STATUS, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionAbortResponse} from a JSON object. + * + * @param jsonObject the JSON object. + * @param dittoHeaders the headers. + * @return the response. + */ + public static MigrateConnectionEncryptionAbortResponse fromJson(final JsonObject jsonObject, + final DittoHeaders dittoHeaders) { + return JSON_DESERIALIZER.deserialize(jsonObject, dittoHeaders); + } + + /** + * Returns the current migration phase. + * + * @return the phase string. + */ + public String getPhase() { + return phase; + } + + /** + * Returns when the migration was aborted. + * + * @return the ISO-8601 timestamp. + */ + public String getAbortedAt() { + return abortedAt; + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, + final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + final Predicate predicate = schemaVersion.and(thePredicate); + jsonObjectBuilder.set(JSON_PHASE, phase, predicate); + jsonObjectBuilder.set(JSON_SNAPSHOTS, snapshots, predicate); + jsonObjectBuilder.set(JSON_JOURNAL_EVENTS, journalEvents, predicate); + jsonObjectBuilder.set(JSON_ABORTED_AT, abortedAt, predicate); + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + public MigrateConnectionEncryptionAbortResponse setDittoHeaders(final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionAbortResponse(phase, snapshots, journalEvents, abortedAt, + HTTP_STATUS, dittoHeaders); + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryptionAbortResponse; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final MigrateConnectionEncryptionAbortResponse that = (MigrateConnectionEncryptionAbortResponse) o; + return Objects.equals(phase, that.phase) && + Objects.equals(snapshots, that.snapshots) && + Objects.equals(journalEvents, that.journalEvents) && + Objects.equals(abortedAt, that.abortedAt); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), phase, snapshots, journalEvents, abortedAt); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + + super.toString() + + ", phase=" + phase + + ", snapshots=" + snapshots + + ", journalEvents=" + journalEvents + + ", abortedAt=" + abortedAt + + "]"; + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java new file mode 100644 index 00000000000..f9c0845e569 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.Objects; +import java.util.Set; +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.common.HttpStatus; +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommandResponse; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommandResponse; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseHttpStatusValidator; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseJsonDeserializer; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonFieldDefinition; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Response to a {@link MigrateConnectionEncryption} command containing migration results. + */ +@Immutable +@JsonParsableCommandResponse(type = MigrateConnectionEncryptionResponse.TYPE) +public final class MigrateConnectionEncryptionResponse + extends AbstractCommandResponse { + + static final String TYPE_PREFIX = "connectivity.responses:"; + + /** + * Type of this response. + */ + public static final String TYPE = TYPE_PREFIX + MigrateConnectionEncryption.NAME; + + static final JsonFieldDefinition JSON_PHASE = + JsonFactory.newStringFieldDefinition("phase", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_DRY_RUN = + JsonFactory.newBooleanFieldDefinition("dryRun", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_RESUMED = + JsonFactory.newBooleanFieldDefinition("resumed", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_STARTED_AT = + JsonFactory.newStringFieldDefinition("startedAt", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_SNAPSHOTS = + JsonFactory.newJsonObjectFieldDefinition("snapshots", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_JOURNAL_EVENTS = + JsonFactory.newJsonObjectFieldDefinition("journalEvents", JsonSchemaVersion.V_2); + + private static final String RESOURCE_TYPE = "connectivity"; + private static final Set ALLOWED_HTTP_STATUSES = + Set.of(HttpStatus.OK, HttpStatus.ACCEPTED); + + private static final CommandResponseJsonDeserializer JSON_DESERIALIZER = + CommandResponseJsonDeserializer.newInstance(TYPE, + context -> { + final JsonObject jsonObject = context.getJsonObject(); + return new MigrateConnectionEncryptionResponse( + jsonObject.getValueOrThrow(JSON_PHASE), + jsonObject.getValueOrThrow(JSON_DRY_RUN), + jsonObject.getValue(JSON_RESUMED).orElse(false), + jsonObject.getValue(JSON_STARTED_AT).orElse(null), + jsonObject.getValue(JSON_SNAPSHOTS).orElse(null), + jsonObject.getValue(JSON_JOURNAL_EVENTS).orElse(null), + context.getDeserializedHttpStatus(), + context.getDittoHeaders()); + }); + + private final String phase; + private final boolean dryRun; + private final boolean resumed; + @Nullable + private final String startedAt; + @Nullable + private final JsonObject snapshots; + @Nullable + private final JsonObject journalEvents; + + private MigrateConnectionEncryptionResponse(final String phase, + final boolean dryRun, + final boolean resumed, + @Nullable final String startedAt, + @Nullable final JsonObject snapshots, + @Nullable final JsonObject journalEvents, + final HttpStatus httpStatus, + final DittoHeaders dittoHeaders) { + super(TYPE, + CommandResponseHttpStatusValidator.validateHttpStatus(httpStatus, + ALLOWED_HTTP_STATUSES, + MigrateConnectionEncryptionResponse.class), + dittoHeaders); + this.phase = phase; + this.dryRun = dryRun; + this.resumed = resumed; + this.startedAt = startedAt; + this.snapshots = snapshots; + this.journalEvents = journalEvents; + } + + /** + * Creates a response for an accepted (async) migration - returns 202 Accepted immediately. + * The migration runs in the background; use the status command to query progress. + * + * @param resumed whether migration was resumed from previous state. + * @param startedAt when migration started (ISO-8601 timestamp), may be {@code null}. + * @param dryRun whether this is a dry-run (no changes will be made). + * @param dittoHeaders the headers. + * @return the response with HTTP 202 Accepted. + */ + public static MigrateConnectionEncryptionResponse accepted( + final boolean resumed, + @Nullable final String startedAt, + final boolean dryRun, + final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionResponse("started", dryRun, resumed, startedAt, + null, null, HttpStatus.ACCEPTED, dittoHeaders); + } + + /** + * Creates a response indicating that a previous migration already completed and there is nothing to resume. + * Returns 200 OK with phase "already_completed". + * + * @param timestamp the current timestamp (ISO-8601). + * @param dittoHeaders the headers. + * @return the response with HTTP 200 OK. + */ + public static MigrateConnectionEncryptionResponse alreadyCompleted( + @Nullable final String timestamp, + final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionResponse("already_completed", false, true, timestamp, + null, null, HttpStatus.OK, dittoHeaders); + } + + /** + * Creates a response for a completed dry-run - returns 200 OK with counts. + * + * @param phase the final migration phase (typically "completed"). + * @param resumed whether migration was resumed from previous state. + * @param startedAt when migration started (ISO-8601 timestamp), may be {@code null}. + * @param snapshotsProcessed number of snapshots that would be migrated. + * @param snapshotsSkipped number of snapshots skipped (already migrated). + * @param snapshotsFailed number of snapshots that failed. + * @param journalProcessed number of journal events that would be migrated. + * @param journalSkipped number of journal events skipped (already migrated). + * @param journalFailed number of journal events that failed. + * @param dittoHeaders the headers. + * @return the response with HTTP 200 OK and counts. + */ + public static MigrateConnectionEncryptionResponse dryRunCompleted(final String phase, + final boolean resumed, + @Nullable final String startedAt, + final long snapshotsProcessed, final long snapshotsSkipped, final long snapshotsFailed, + final long journalProcessed, final long journalSkipped, final long journalFailed, + final DittoHeaders dittoHeaders) { + + final JsonObject snapshotsObj = JsonFactory.newObjectBuilder() + .set("processed", snapshotsProcessed) + .set("skipped", snapshotsSkipped) + .set("failed", snapshotsFailed) + .build(); + final JsonObject journalObj = JsonFactory.newObjectBuilder() + .set("processed", journalProcessed) + .set("skipped", journalSkipped) + .set("failed", journalFailed) + .build(); + + return new MigrateConnectionEncryptionResponse(phase, true, resumed, startedAt, + snapshotsObj, journalObj, HttpStatus.OK, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionResponse} from a JSON object. + * + * @param jsonObject the JSON object. + * @param dittoHeaders the headers. + * @return the response. + */ + public static MigrateConnectionEncryptionResponse fromJson(final JsonObject jsonObject, + final DittoHeaders dittoHeaders) { + return JSON_DESERIALIZER.deserialize(jsonObject, dittoHeaders); + } + + /** + * Returns the current migration phase. + * + * @return the phase string. + */ + public String getPhase() { + return phase; + } + + /** + * Returns whether this was a dry-run. + * + * @return {@code true} if dry-run. + */ + public boolean isDryRun() { + return dryRun; + } + + /** + * Returns whether migration was resumed. + * + * @return {@code true} if migration was resumed from previous state. + */ + public boolean isResumed() { + return resumed; + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, + final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + final Predicate predicate = schemaVersion.and(thePredicate); + jsonObjectBuilder.set(JSON_PHASE, phase, predicate); + jsonObjectBuilder.set(JSON_DRY_RUN, dryRun, predicate); + jsonObjectBuilder.set(JSON_RESUMED, resumed, predicate); + if (startedAt != null) { + jsonObjectBuilder.set(JSON_STARTED_AT, startedAt, predicate); + } + if (snapshots != null) { + jsonObjectBuilder.set(JSON_SNAPSHOTS, snapshots, predicate); + } + if (journalEvents != null) { + jsonObjectBuilder.set(JSON_JOURNAL_EVENTS, journalEvents, predicate); + } + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + public MigrateConnectionEncryptionResponse setDittoHeaders(final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionResponse(phase, dryRun, resumed, startedAt, + snapshots, journalEvents, getHttpStatus(), dittoHeaders); + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryptionResponse; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final MigrateConnectionEncryptionResponse that = (MigrateConnectionEncryptionResponse) o; + return dryRun == that.dryRun && + resumed == that.resumed && + Objects.equals(phase, that.phase) && + Objects.equals(startedAt, that.startedAt) && + Objects.equals(snapshots, that.snapshots) && + Objects.equals(journalEvents, that.journalEvents); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), phase, dryRun, resumed, startedAt, snapshots, journalEvents); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + + super.toString() + + ", phase=" + phase + + ", dryRun=" + dryRun + + ", resumed=" + resumed + + ", startedAt=" + startedAt + + ", snapshots=" + snapshots + + ", journalEvents=" + journalEvents + + "]"; + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java new file mode 100644 index 00000000000..8b64c45df09 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommand; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommand; +import org.eclipse.ditto.base.model.signals.commands.CommandJsonDeserializer; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Command to query the current status of an encryption migration. + *

+ * Returns the progress from the migration progress collection, including whether a migration + * is currently running, the current phase, and document counts. + */ +@Immutable +@JsonParsableCommand(typePrefix = MigrateConnectionEncryptionStatus.TYPE_PREFIX, + name = MigrateConnectionEncryptionStatus.NAME) +public final class MigrateConnectionEncryptionStatus extends AbstractCommand { + + static final String TYPE_PREFIX = "connectivity.commands:"; + static final String NAME = "migrateEncryptionStatus"; + + /** + * Type of this command. + */ + public static final String TYPE = TYPE_PREFIX + NAME; + + private static final String RESOURCE_TYPE = "connectivity"; + + private MigrateConnectionEncryptionStatus(final DittoHeaders dittoHeaders) { + super(TYPE, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionStatus} command. + * + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryptionStatus of(final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionStatus(dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionStatus} from a JSON object. + * + * @param jsonObject the JSON object of which the command is to be created. + * @param dittoHeaders the headers of the command. + * @return the command. + */ + public static MigrateConnectionEncryptionStatus fromJson(final JsonObject jsonObject, + final DittoHeaders dittoHeaders) { + return new CommandJsonDeserializer(TYPE, jsonObject) + .deserialize(() -> of(dittoHeaders)); + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + // no payload fields + } + + @Override + public String getTypePrefix() { + return TYPE_PREFIX; + } + + @Override + public Category getCategory() { + return Category.QUERY; + } + + @Override + public MigrateConnectionEncryptionStatus setDittoHeaders(final DittoHeaders dittoHeaders) { + return of(dittoHeaders); + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryptionStatus; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + super.toString() + "]"; + } + +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java new file mode 100644 index 00000000000..a1eb501f52e --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.util.Collections; +import java.util.Objects; +import java.util.function.Predicate; + +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +import org.eclipse.ditto.base.model.common.HttpStatus; +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.base.model.json.JsonParsableCommandResponse; +import org.eclipse.ditto.base.model.json.JsonSchemaVersion; +import org.eclipse.ditto.base.model.signals.commands.AbstractCommandResponse; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseHttpStatusValidator; +import org.eclipse.ditto.base.model.signals.commands.CommandResponseJsonDeserializer; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonField; +import org.eclipse.ditto.json.JsonFieldDefinition; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonObjectBuilder; +import org.eclipse.ditto.json.JsonPointer; + +/** + * Response to a {@link MigrateConnectionEncryptionStatus} command containing detailed migration progress. + */ +@Immutable +@JsonParsableCommandResponse(type = MigrateConnectionEncryptionStatusResponse.TYPE) +public final class MigrateConnectionEncryptionStatusResponse + extends AbstractCommandResponse { + + static final String TYPE_PREFIX = "connectivity.responses:"; + + /** + * Type of this response. + */ + public static final String TYPE = TYPE_PREFIX + MigrateConnectionEncryptionStatus.NAME; + + static final JsonFieldDefinition JSON_PHASE = + JsonFactory.newStringFieldDefinition("phase", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_SNAPSHOTS = + JsonFactory.newJsonObjectFieldDefinition("snapshots", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_JOURNAL_EVENTS = + JsonFactory.newJsonObjectFieldDefinition("journalEvents", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_PROGRESS = + JsonFactory.newJsonObjectFieldDefinition("progress", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_TIMING = + JsonFactory.newJsonObjectFieldDefinition("timing", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_DRY_RUN = + JsonFactory.newBooleanFieldDefinition("dryRun", JsonSchemaVersion.V_2); + static final JsonFieldDefinition JSON_MIGRATION_ACTIVE = + JsonFactory.newBooleanFieldDefinition("migrationActive", JsonSchemaVersion.V_2); + + private static final String RESOURCE_TYPE = "connectivity"; + private static final HttpStatus HTTP_STATUS = HttpStatus.OK; + + private static final CommandResponseJsonDeserializer JSON_DESERIALIZER = + CommandResponseJsonDeserializer.newInstance(TYPE, + context -> { + final JsonObject jsonObject = context.getJsonObject(); + return new MigrateConnectionEncryptionStatusResponse( + jsonObject.getValueOrThrow(JSON_PHASE), + jsonObject.getValueOrThrow(JSON_SNAPSHOTS), + jsonObject.getValueOrThrow(JSON_JOURNAL_EVENTS), + jsonObject.getValue(JSON_PROGRESS).orElse(null), + jsonObject.getValue(JSON_TIMING).orElse(null), + jsonObject.getValue(JSON_DRY_RUN).orElse(false), + jsonObject.getValue(JSON_MIGRATION_ACTIVE).orElse(false), + context.getDeserializedHttpStatus(), + context.getDittoHeaders()); + }); + + private final String phase; + private final JsonObject snapshots; + private final JsonObject journalEvents; + @Nullable + private final JsonObject progress; + @Nullable + private final JsonObject timing; + private final boolean dryRun; + private final boolean migrationActive; + + private MigrateConnectionEncryptionStatusResponse(final String phase, + final JsonObject snapshots, + final JsonObject journalEvents, + @Nullable final JsonObject progress, + @Nullable final JsonObject timing, + final boolean dryRun, + final boolean migrationActive, + final HttpStatus httpStatus, + final DittoHeaders dittoHeaders) { + super(TYPE, + CommandResponseHttpStatusValidator.validateHttpStatus(httpStatus, + Collections.singleton(HTTP_STATUS), + MigrateConnectionEncryptionStatusResponse.class), + dittoHeaders); + this.phase = phase; + this.snapshots = snapshots; + this.journalEvents = journalEvents; + this.progress = progress; + this.timing = timing; + this.dryRun = dryRun; + this.migrationActive = migrationActive; + } + + /** + * Creates a new {@code MigrateConnectionEncryptionStatusResponse}. + * + * @param phase the current migration phase. + * @param snapshotsProcessed number of snapshots processed. + * @param snapshotsSkipped number of snapshots skipped. + * @param snapshotsFailed number of snapshots that failed. + * @param journalProcessed number of journal documents processed. + * @param journalSkipped number of journal documents skipped. + * @param journalFailed number of journal documents that failed. + * @param lastProcessedSnapshotId last processed snapshot document ID, may be {@code null}. + * @param lastProcessedSnapshotPid last processed snapshot persistence ID (connection ID), may be {@code null}. + * @param lastProcessedJournalId last processed journal document ID, may be {@code null}. + * @param lastProcessedJournalPid last processed journal persistence ID (connection ID), may be {@code null}. + * @param startedAt when migration started, may be {@code null}. + * @param updatedAt when migration was last updated, may be {@code null}. + * @param dryRun whether the migration was/is a dry-run. + * @param migrationActive whether migration is currently active. + * @param dittoHeaders the headers. + * @return the response. + */ + public static MigrateConnectionEncryptionStatusResponse of(final String phase, + final long snapshotsProcessed, final long snapshotsSkipped, final long snapshotsFailed, + final long journalProcessed, final long journalSkipped, final long journalFailed, + @Nullable final String lastProcessedSnapshotId, @Nullable final String lastProcessedSnapshotPid, + @Nullable final String lastProcessedJournalId, @Nullable final String lastProcessedJournalPid, + @Nullable final String startedAt, @Nullable final String updatedAt, + final boolean dryRun, + final boolean migrationActive, + final DittoHeaders dittoHeaders) { + + final JsonObject snapshots = JsonFactory.newObjectBuilder() + .set("processed", snapshotsProcessed) + .set("skipped", snapshotsSkipped) + .set("failed", snapshotsFailed) + .build(); + final JsonObject journal = JsonFactory.newObjectBuilder() + .set("processed", journalProcessed) + .set("skipped", journalSkipped) + .set("failed", journalFailed) + .build(); + + final JsonObjectBuilder progressBuilder = JsonFactory.newObjectBuilder(); + if (lastProcessedSnapshotId != null) { + progressBuilder.set("lastProcessedSnapshotId", lastProcessedSnapshotId); + } + if (lastProcessedSnapshotPid != null) { + progressBuilder.set("lastProcessedSnapshotPid", lastProcessedSnapshotPid); + } + if (lastProcessedJournalId != null) { + progressBuilder.set("lastProcessedJournalId", lastProcessedJournalId); + } + if (lastProcessedJournalPid != null) { + progressBuilder.set("lastProcessedJournalPid", lastProcessedJournalPid); + } + final JsonObject progress = progressBuilder.build(); + + final JsonObjectBuilder timingBuilder = JsonFactory.newObjectBuilder(); + if (startedAt != null) { + timingBuilder.set("startedAt", startedAt); + } + if (updatedAt != null) { + timingBuilder.set("updatedAt", updatedAt); + } + final JsonObject timing = timingBuilder.build(); + + return new MigrateConnectionEncryptionStatusResponse(phase, snapshots, journal, + progress.isEmpty() ? null : progress, + timing.isEmpty() ? null : timing, + dryRun, + migrationActive, + HTTP_STATUS, dittoHeaders); + } + + /** + * Creates a new {@code MigrateConnectionEncryptionStatusResponse} from a JSON object. + * + * @param jsonObject the JSON object. + * @param dittoHeaders the headers. + * @return the response. + */ + public static MigrateConnectionEncryptionStatusResponse fromJson(final JsonObject jsonObject, + final DittoHeaders dittoHeaders) { + return JSON_DESERIALIZER.deserialize(jsonObject, dittoHeaders); + } + + /** + * Returns the current migration phase. + * + * @return the phase string. + */ + public String getPhase() { + return phase; + } + + /** + * Returns whether migration is currently active. + * + * @return {@code true} if migration is active. + */ + public boolean isMigrationActive() { + return migrationActive; + } + + @Override + protected void appendPayload(final JsonObjectBuilder jsonObjectBuilder, + final JsonSchemaVersion schemaVersion, + final Predicate thePredicate) { + final Predicate predicate = schemaVersion.and(thePredicate); + jsonObjectBuilder.set(JSON_PHASE, phase, predicate); + jsonObjectBuilder.set(JSON_SNAPSHOTS, snapshots, predicate); + jsonObjectBuilder.set(JSON_JOURNAL_EVENTS, journalEvents, predicate); + if (progress != null) { + jsonObjectBuilder.set(JSON_PROGRESS, progress, predicate); + } + if (timing != null) { + jsonObjectBuilder.set(JSON_TIMING, timing, predicate); + } + jsonObjectBuilder.set(JSON_DRY_RUN, dryRun, predicate); + jsonObjectBuilder.set(JSON_MIGRATION_ACTIVE, migrationActive, predicate); + } + + @Override + public JsonPointer getResourcePath() { + return JsonPointer.empty(); + } + + @Override + public String getResourceType() { + return RESOURCE_TYPE; + } + + @Override + public MigrateConnectionEncryptionStatusResponse setDittoHeaders(final DittoHeaders dittoHeaders) { + return new MigrateConnectionEncryptionStatusResponse(phase, snapshots, journalEvents, + progress, timing, dryRun, migrationActive, HTTP_STATUS, dittoHeaders); + } + + @Override + protected boolean canEqual(@Nullable final Object other) { + return other instanceof MigrateConnectionEncryptionStatusResponse; + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final MigrateConnectionEncryptionStatusResponse that = (MigrateConnectionEncryptionStatusResponse) o; + return dryRun == that.dryRun && + migrationActive == that.migrationActive && + Objects.equals(phase, that.phase) && + Objects.equals(snapshots, that.snapshots) && + Objects.equals(journalEvents, that.journalEvents) && + Objects.equals(progress, that.progress) && + Objects.equals(timing, that.timing); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), phase, snapshots, journalEvents, progress, timing, dryRun, migrationActive); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " [" + + super.toString() + + ", phase=" + phase + + ", snapshots=" + snapshots + + ", journalEvents=" + journalEvents + + ", progress=" + progress + + ", timing=" + timing + + ", dryRun=" + dryRun + + ", migrationActive=" + migrationActive + + "]"; + } + +} diff --git a/connectivity/service/src/main/resources/connectivity.conf b/connectivity/service/src/main/resources/connectivity.conf index feadf7e0144..1ca4ce25089 100644 --- a/connectivity/service/src/main/resources/connectivity.conf +++ b/connectivity/service/src/main/resources/connectivity.conf @@ -243,6 +243,18 @@ ditto { "/credentials/password" ] json-pointers = ${?CONNECTIVITY_CONNECTION_ENCRYPTION_POINTERS} + + migration { + # Batch size for the encryption migration process (documents per batch) + batch-size = 100 + batch-size = ${?CONNECTIVITY_ENCRYPTION_MIGRATION_BATCH_SIZE} + + # Maximum number of documents to migrate per minute (throttling) + # Set to 0 to disable throttling (not recommended for production) + # Default: 100 documents per minute (safe for most deployments) + max-documents-per-minute = 200 + max-documents-per-minute = ${?CONNECTIVITY_ENCRYPTION_MIGRATION_MAX_DOCS_PER_MINUTE} + } } supervisor { diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandRegistryTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandRegistryTest.java index 68120856c18..e7bf55e14b8 100644 --- a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandRegistryTest.java +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandRegistryTest.java @@ -23,6 +23,9 @@ import org.eclipse.ditto.connectivity.api.commands.sudo.SudoRetrieveConnectionIdsByTag; import org.eclipse.ditto.connectivity.model.signals.commands.modify.OpenConnection; import org.eclipse.ditto.connectivity.model.signals.commands.query.RetrieveConnection; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryption; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryptionAbort; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryptionStatus; import org.eclipse.ditto.connectivity.service.messaging.persistence.stages.StagedCommand; import org.eclipse.ditto.internal.models.streaming.SudoStreamPids; import org.eclipse.ditto.internal.utils.health.RetrieveHealth; @@ -71,7 +74,10 @@ public ConnectivityServiceGlobalCommandRegistryTest() { PublishSignal.class, SudoAddConnectionLogEntry.class, SubscribeForPersistedEvents.class, - CreateWotValidationConfig.class + CreateWotValidationConfig.class, + MigrateConnectionEncryption.class, + MigrateConnectionEncryptionAbort.class, + MigrateConnectionEncryptionStatus.class ); } diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandResponseRegistryTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandResponseRegistryTest.java index a5e88d86e3e..6ddc8114514 100644 --- a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandResponseRegistryTest.java +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/ConnectivityServiceGlobalCommandResponseRegistryTest.java @@ -38,6 +38,9 @@ import org.eclipse.ditto.thingsearch.api.commands.sudo.SudoRetrieveNamespaceReportResponse; import org.eclipse.ditto.thingsearch.model.signals.commands.SearchErrorResponse; import org.eclipse.ditto.thingsearch.model.signals.commands.query.QueryThingsResponse; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryptionAbortResponse; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryptionResponse; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrateConnectionEncryptionStatusResponse; import org.eclipse.ditto.things.model.devops.commands.CreateWotValidationConfigResponse; public final class ConnectivityServiceGlobalCommandResponseRegistryTest extends GlobalCommandResponseRegistryTestCases { @@ -69,7 +72,10 @@ public ConnectivityServiceGlobalCommandResponseRegistryTest() { PurgeEntitiesResponse.class, ModifySplitBrainResolverResponse.class, CreateWotValidationConfigResponse.class, - Acknowledgement.class + Acknowledgement.class, + MigrateConnectionEncryptionResponse.class, + MigrateConnectionEncryptionStatusResponse.class, + MigrateConnectionEncryptionAbortResponse.class ); } diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/mqtt/hivemq/MqttClientActorIT.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/mqtt/hivemq/MqttClientActorIT.java index a053f90e853..57f0690ec7c 100644 --- a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/mqtt/hivemq/MqttClientActorIT.java +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/mqtt/hivemq/MqttClientActorIT.java @@ -343,6 +343,7 @@ public void testSingleTopicAfterReconnect() throws InterruptedException { disconnect(underTest, this); connect(underTest, this); + publishMergeThingMessage(mqttClient, TOPIC_NAME, "key", "test"); expectMergeThingMessage(commandForwarderProbe, "key", "test"); commandForwarderProbe.expectNoMessage(NO_MESSAGE_TIMEOUT); diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java new file mode 100644 index 00000000000..bcdf16cb7d5 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2024 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.security.NoSuchAlgorithmException; +import java.util.List; + +import org.eclipse.ditto.connectivity.service.util.EncryptorAesGcm; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonObject; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests for the encryption migration logic in {@link EncryptionMigrationActor}. + *

+ * Tests the core re-encryption logic (snapshot and journal field re-encryption), + * command/response serialization, and validation behavior. + */ +public final class EncryptionMigrationActorTest { + + private static String OLD_KEY; + private static String NEW_KEY; + private static String WRONG_KEY; + + private static final List POINTERS = List.of( + "/uri", + "/credentials/password" + ); + + @BeforeClass + public static void initKeys() throws NoSuchAlgorithmException { + OLD_KEY = EncryptorAesGcm.generateAESKeyAsString(); + NEW_KEY = EncryptorAesGcm.generateAESKeyAsString(); + WRONG_KEY = EncryptorAesGcm.generateAESKeyAsString(); + } + + // --- Re-encryption logic tests --- + + @Test + public void reEncryptSnapshotFieldsFromOldKeyToNewKey() { + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encryptedWithOldKey, "", POINTERS, OLD_KEY, NEW_KEY); + + assertThat(result).isNotNull(); + final JsonObject decryptedWithNewKey = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(decryptedWithNewKey.getValue("/credentials/password")) + .contains(plain.getValue("/credentials/password").get()); + } + + @Test + public void reEncryptJournalFieldsFromOldKeyToNewKey() { + final JsonObject plain = createPlainJournalJson(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt( + plain, "connection", POINTERS, OLD_KEY); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encryptedWithOldKey, "connection", POINTERS, OLD_KEY, NEW_KEY); + + assertThat(result).isNotNull(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "connection", POINTERS, NEW_KEY); + assertThat(decrypted.getValue("/connection/credentials/password")) + .contains(plain.getValue("/connection/credentials/password").get()); + } + + @Test + public void skipAlreadyMigratedDocument() { + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject encryptedWithNewKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encryptedWithNewKey, "", POINTERS, OLD_KEY, NEW_KEY); + + assertThat(result).isNull(); + } + + @Test + public void whenBothKeysFailTreatsAsPlaintextAndEncrypts() { + // When neither old nor new key can decrypt, the data is treated as plaintext + // and encrypted with the new key. This handles the case where data was stored + // before encryption was enabled. + final JsonObject plain = createPlainSnapshotJson(); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + plain, "", POINTERS, OLD_KEY, NEW_KEY); + + // Should encrypt the plaintext data with new key + assertThat(result).isNotNull(); + + // Verify it can be decrypted with the new key + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(decrypted).isEqualTo(plain); + } + + @Test + public void initialEncryptionEncryptsPlaintext() { + // Initial encryption: oldKey is null, newKey is set + final JsonObject plain = createPlainSnapshotJson(); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + plain, "", POINTERS, null, NEW_KEY); + + assertThat(result).isNotNull(); + // Verify it can be decrypted with the new key + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(decrypted).isEqualTo(plain); + } + + @Test + public void initialEncryptionSkipsAlreadyEncrypted() { + // Initial encryption: oldKey is null, data already encrypted with new key + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject alreadyEncrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + alreadyEncrypted, "", POINTERS, null, NEW_KEY); + + // Should skip - already encrypted + assertThat(result).isNull(); + } + + @Test + public void uriPasswordReEncryptedCorrectly() { + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + final String encryptedUri = encryptedWithOldKey.getValue("/uri").get().asString(); + assertThat(encryptedUri).contains("encrypted_"); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encryptedWithOldKey, "", POINTERS, OLD_KEY, NEW_KEY); + + assertThat(result).isNotNull(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(decrypted.getValue("/uri").map(v -> v.asString())) + .hasValue("amqps://user:secretpassword@broker.example.com:5671"); + } + + @Test + public void plainTextFieldsNotAffected() { + final JsonObject plain = createPlainSnapshotJson(); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + plain, "", POINTERS, OLD_KEY, NEW_KEY); + + assertThat(result).isNotNull(); + final String encryptedPwd = result.getValue("/credentials/password").get().asString(); + assertThat(encryptedPwd).startsWith("encrypted_"); + } + + @Test + public void initialEncryptionSkipsUriWithAlreadyEncryptedPassword() { + // Bug 1: URI fields like amqps://user:encrypted_XXX@host were not detected + // as already encrypted because startsWith("encrypted_") checks the full URI string + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + // Verify the URI has encrypted password embedded (not a direct encrypted_ prefix) + final String encUri = encrypted.getValue("/uri").get().asString(); + assertThat(encUri).startsWith("amqps://"); + assertThat(encUri).contains("encrypted_"); + + // Initial encryption (oldKey=null) should detect the encrypted URI and skip + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encrypted, "", POINTERS, null, NEW_KEY); + + assertThat(result).isNull(); + } + + @Test + public void disableWorkflowSkipsAlreadyPlaintextEntity() { + // Bug 2: decrypt() silently passes through plaintext, so disable workflow + // was counting plaintext entities as "processed" instead of "skipped" + final JsonObject plain = createPlainSnapshotJson(); + + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + plain, "", POINTERS, OLD_KEY, null); + + // Should return null (skip) because decrypt returns unchanged plaintext + assertThat(result).isNull(); + } + + @Test + public void disableWorkflowProcessesEntityWithEncryptedUriPassword() { + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + // Disable workflow (newKey=null) should decrypt and return plaintext + final JsonObject result = EncryptionMigrationActor.reEncryptFields( + encrypted, "", POINTERS, OLD_KEY, null); + + assertThat(result).isNotNull(); + assertThat(result.getValue("/uri").get().asString()) + .isEqualTo("amqps://user:secretpassword@broker.example.com:5671"); + assertThat(result.getValue("/credentials/password").get().asString()) + .isEqualTo("mypassword"); + } + + @Test + public void eventWithNoEncryptableFieldsIsSkipped() { + // Empty events or events without any of the configured pointers should be skipped + final JsonObject emptyEvent = JsonFactory.newObjectBuilder() + .set("/connection/type", "persistence-actor-internal:empty-event") + .set("/connection/effect", "priorityUpdate") + .build(); + + // Initial encryption + assertThat(EncryptionMigrationActor.reEncryptFields( + emptyEvent, "connection", POINTERS, null, NEW_KEY)).isNull(); + + // Key rotation + assertThat(EncryptionMigrationActor.reEncryptFields( + emptyEvent, "connection", POINTERS, OLD_KEY, NEW_KEY)).isNull(); + + // Disable workflow + assertThat(EncryptionMigrationActor.reEncryptFields( + emptyEvent, "connection", POINTERS, OLD_KEY, null)).isNull(); + } + + // --- MigrateConnectionEncryption command tests --- + + @Test + public void commandSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var command = MigrateConnectionEncryption.of(true, false, headers); + + final JsonObject json = command.toJson(); + final var deserialized = MigrateConnectionEncryption.fromJson(json, headers); + + assertThat(deserialized.isDryRun()).isTrue(); + assertThat(deserialized.isResume()).isFalse(); + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryption.TYPE); + } + + @Test + public void commandDefaultValues() { + final JsonObject minimalJson = JsonFactory.newObjectBuilder() + .set("type", MigrateConnectionEncryption.TYPE) + .build(); + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + + final var command = MigrateConnectionEncryption.fromJson(minimalJson, headers); + + assertThat(command.isDryRun()).isFalse(); + assertThat(command.isResume()).isFalse(); + } + + // --- MigrateConnectionEncryptionAbort command tests --- + + @Test + public void abortCommandSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var command = MigrateConnectionEncryptionAbort.of(headers); + + final JsonObject json = command.toJson(); + final var deserialized = MigrateConnectionEncryptionAbort.fromJson(json, headers); + + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionAbort.TYPE); + assertThat(deserialized.getType()).isEqualTo("connectivity.commands:migrateEncryptionAbort"); + } + + // --- MigrateConnectionEncryptionStatus command tests --- + + @Test + public void statusCommandSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var command = MigrateConnectionEncryptionStatus.of(headers); + + final JsonObject json = command.toJson(); + final var deserialized = MigrateConnectionEncryptionStatus.fromJson(json, headers); + + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionStatus.TYPE); + assertThat(deserialized.getType()).isEqualTo("connectivity.commands:migrateEncryptionStatus"); + } + + // --- Response tests --- + + @Test + public void acceptedResponseSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var response = MigrateConnectionEncryptionResponse.accepted( + false, "2026-02-16T10:00:00Z", false, headers); + + final JsonObject json = response.toJson(); + final var deserialized = MigrateConnectionEncryptionResponse.fromJson(json, headers); + + assertThat(deserialized.getPhase()).isEqualTo("started"); + assertThat(deserialized.isDryRun()).isFalse(); + assertThat(deserialized.isResumed()).isFalse(); + assertThat(deserialized.getHttpStatus()).isEqualTo(org.eclipse.ditto.base.model.common.HttpStatus.ACCEPTED); + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionResponse.TYPE); + } + + @Test + public void dryRunResponseSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var response = MigrateConnectionEncryptionResponse.dryRunCompleted( + "completed", false, "2026-02-16T10:00:00Z", + 100, 10, 2, 200, 20, 5, headers); + + final JsonObject json = response.toJson(); + final var deserialized = MigrateConnectionEncryptionResponse.fromJson(json, headers); + + assertThat(deserialized.getPhase()).isEqualTo("completed"); + assertThat(deserialized.isDryRun()).isTrue(); + assertThat(deserialized.isResumed()).isFalse(); + assertThat(deserialized.getHttpStatus()).isEqualTo(org.eclipse.ditto.base.model.common.HttpStatus.OK); + assertThat(json.getValue("snapshots/processed")).contains(org.eclipse.ditto.json.JsonValue.of(100)); + assertThat(json.getValue("journalEvents/processed")).contains(org.eclipse.ditto.json.JsonValue.of(200)); + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionResponse.TYPE); + } + + @Test + public void statusResponseSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var response = MigrateConnectionEncryptionStatusResponse.of( + "in_progress:snapshots", + 150, 10, 2, + 0, 0, 0, + "507f1f77bcf86cd799439011", "connection:mqtt-prod-sensor-01", + null, null, + "2026-02-16T10:00:00Z", "2026-02-16T10:30:00Z", + true, + true, + headers); + + final JsonObject json = response.toJson(); + final var deserialized = MigrateConnectionEncryptionStatusResponse.fromJson(json, headers); + + assertThat(deserialized.getPhase()).isEqualTo("in_progress:snapshots"); + assertThat(deserialized.isMigrationActive()).isTrue(); + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionStatusResponse.TYPE); + } + + @Test + public void abortResponseSerializationRoundTrip() { + final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var response = MigrateConnectionEncryptionAbortResponse.of( + "aborted:snapshots", + 150, 10, 2, + 0, 0, 0, + "2026-02-16T10:35:00Z", + headers); + + final JsonObject json = response.toJson(); + final var deserialized = MigrateConnectionEncryptionAbortResponse.fromJson(json, headers); + + assertThat(deserialized.getPhase()).isEqualTo("aborted:snapshots"); + assertThat(deserialized.getAbortedAt()).isEqualTo("2026-02-16T10:35:00Z"); + assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionAbortResponse.TYPE); + } + + // --- Progress tracking tests --- + + @Test + public void migrationProgressTracking() { + final EncryptionMigrationActor.MigrationProgress progress = + new EncryptionMigrationActor.MigrationProgress(); + + progress.incrementSnapshotsProcessed() + .incrementSnapshotsProcessed() + .incrementSnapshotsSkipped() + .incrementSnapshotsFailed(); + + assertThat(progress.snapshotsProcessed).isEqualTo(2); + assertThat(progress.snapshotsSkipped).isEqualTo(1); + assertThat(progress.snapshotsFailed).isEqualTo(1); + + progress.withPhase("journal") + .incrementJournalProcessed() + .incrementJournalSkipped(); + + assertThat(progress.phase).isEqualTo("journal"); + assertThat(progress.journalProcessed).isEqualTo(1); + assertThat(progress.journalSkipped).isEqualTo(1); + } + + // --- Helper methods --- + + private static JsonObject createPlainSnapshotJson() { + return JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:password123@broker.example.com:5671") + .set("/credentials/password", "secretPassword") + .set("/name", "test-connection") + .build(); + } + + private static JsonObject createPlainJournalJson() { + return JsonFactory.newObjectBuilder() + .set("/connection/uri", "amqps://user:password123@broker.example.com:5671") + .set("/connection/credentials/password", "secretPassword") + .set("/connection/name", "test-connection") + .build(); + } + +} From ec82e0c03841855297b7da201c7080cd97510a9b Mon Sep 17 00:00:00 2001 From: Aleksandar Stanchev Date: Wed, 18 Feb 2026 14:05:27 +0200 Subject: [PATCH 3/5] Adds documentation for rolling encrypted secrets symmetric key. Signed-off-by: Aleksandar Stanchev --- ...nnectivity-manage-connections-piggyback.md | 201 +++++++++++++++++- .../pages/ditto/installation-operating.md | 191 ++++++++++++++++- 2 files changed, 388 insertions(+), 4 deletions(-) diff --git a/documentation/src/main/resources/pages/ditto/connectivity-manage-connections-piggyback.md b/documentation/src/main/resources/pages/ditto/connectivity-manage-connections-piggyback.md index a517ec3dd65..117f0cd7a74 100644 --- a/documentation/src/main/resources/pages/ditto/connectivity-manage-connections-piggyback.md +++ b/documentation/src/main/resources/pages/ditto/connectivity-manage-connections-piggyback.md @@ -357,7 +357,7 @@ Clears all currently stored connection logs. "headers": { "aggregate": false, "is-group-topic": false, - "ditto-sudo": true + "ditto-sudo": true }, "piggybackCommand": { "type": "connectivity.commands:resetConnectionLogs", @@ -366,6 +366,203 @@ Clears all currently stored connection logs. } ``` +## Encryption of secrets migration commands + +Since Ditto 3.9.0, the following commands are available for managing encryption key rotation: + +* [migrate encryption](#migrate-encryption) +* [migration status](#migration-status) +* [abort migration](#abort-migration) + +These commands enable safe encryption key rotation and encryption disable workflows without downtime or data loss. +For detailed information about encryption configuration and workflows, refer to +[Encrypt sensitive data in Connections](installation-operating.html#encrypt-sensitive-data-in-connections). + +### Migrate encryption + +Trigger batch processing of all persisted connection data (snapshots and journal events). The command supports two workflows: + +**Migration Logic:** +- **Key Rotation:** `encryption-enabled = true` + both keys set → Decrypt with old key, re-encrypt with new key +- **Disable Encryption:** `encryption-enabled = false` + old key set → Decrypt with old key, write plaintext + +The configuration determines which workflow is executed. + +**Start a new migration:** + +```json +{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": false, + "resume": false + } +} +``` + +**Dry-run migration (count affected documents without making changes):** + +```json +{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": true, + "resume": false + } +} +``` + +**Resume a previously started/aborted migration:** + +```json +{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": false, + "resume": true + } +} +``` + +If the previous migration already completed, or no previous migration exists (e.g., after a dry run which does +not persist progress), the response will be `200 OK` with `phase: "already_completed"` instead of starting a new +migration. + +**Example response when starting/resuming migration:** + +```json +{ + "type": "connectivity.responses:migrateEncryption", + "status": 202, + "phase": "snapshots", + "dryRun": false, + "resumed": true, + "startedAt": "2026-02-16T10:00:00Z" +} +``` + +**Example response when there is nothing to resume (already completed or never started):** + +```json +{ + "type": "connectivity.responses:migrateEncryption", + "status": 200, + "phase": "already_completed", + "dryRun": false, + "resumed": true, + "startedAt": "2026-02-16T10:00:00Z" +} +``` + +The response indicates: +- **phase**: Starting phase of the migration +- **dryRun**: Whether this is a dry-run (no changes made) +- **resumed**: Whether migration was resumed from previous state or started fresh +- **startedAt**: When migration originally started (for resumed migrations) or now (for new migrations) + +### Migration status + +Query the current status and progress of an encryption migration. + +```json +{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryptionStatus" + } +} +``` + +**Example response:** + +```json +{ + "type": "connectivity.responses:migrateEncryptionStatus", + "status": 200, + "phase": "in_progress:snapshots", + "snapshots": { + "processed": 150, + "skipped": 10, + "failed": 2 + }, + "journalEvents": { + "processed": 0, + "skipped": 0, + "failed": 0 + }, + "progress": { + "lastProcessedSnapshotId": "507f1f77bcf86cd799439011", + "lastProcessedSnapshotPid": "connection:mqtt-prod-sensor-01", + "lastProcessedJournalId": null, + "lastProcessedJournalPid": null + }, + "timing": { + "startedAt": "2026-02-16T10:00:00Z", + "updatedAt": "2026-02-16T10:30:00Z" + }, + "migrationActive": true +} +``` + +The response includes: +- **phase**: Current migration phase (`snapshots`, `journal`, `completed`, or `in_progress:`) +- **snapshots/journalEvents**: Document counters (processed, skipped, failed) +- **progress**: Last processed document IDs and persistence IDs (connection IDs) for resume tracking +- **timing**: When migration started and was last updated +- **migrationActive**: Whether migration is currently running + +### Abort migration + +Abort a currently running encryption migration. The migration will stop after the current batch completes, +and progress will be saved to allow resuming later. + +```json +{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryptionAbort" + } +} +``` + +**Example response:** + +```json +{ + "type": "connectivity.responses:migrateEncryptionAbort", + "status": 200, + "phase": "aborted:snapshots", + "snapshots": { + "processed": 150, + "skipped": 10, + "failed": 2 + }, + "journalEvents": { + "processed": 0, + "skipped": 0, + "failed": 0 + }, + "abortedAt": "2026-02-16T10:35:00Z" +} +``` ## Publishing connection logs @@ -374,5 +571,5 @@ HTTP API section about managing connections. ## Payload mapping configuration -Please refer to [Payload mapping configuration](connectivity-manage-connections.html#payload-mapping-configuration) in +Please refer to [Payload mapping configuration](connectivity-manage-connections.html#payload-mapping-configuration) in HTTP API section about managing connections. diff --git a/documentation/src/main/resources/pages/ditto/installation-operating.md b/documentation/src/main/resources/pages/ditto/installation-operating.md index 72a4c8e626b..283c5995ca3 100644 --- a/documentation/src/main/resources/pages/ditto/installation-operating.md +++ b/documentation/src/main/resources/pages/ditto/installation-operating.md @@ -250,7 +250,7 @@ ssl-config { } ``` -### Encrypt sensitive data in Connections +### Encrypt sensitive data in Connections Since Ditto 3.1.0 there is the option to enable encryption on some connection fields before they are written to the database. @@ -308,10 +308,197 @@ Configuration can be seen at [Ditto service configuration files](#ditto-configur the [connectivity.conf](https://github.com/eclipse-ditto/ditto/blob/master/connectivity/service/src/main/resources/connectivity.conf) at "ditto.connectivity.connection.encryption" section of the config. -If at some point encryption is decided to be disabled the symmetric key is important to be kept in the +If at some point encryption is decided to be disabled the symmetric key is important to be kept in the configuration otherwise the encrypted values will not be decrypted and the only way to fix the connections will be to edit the encrypted parts and save them. +#### Encryption key rotation + +Since Ditto 3.10.0, it is possible to rotate encryption keys without downtime or data loss using a dual-key configuration +and a migration command. + +##### Dual-key configuration + +The encryption configuration supports both a current key and an optional old key for fallback decryption: + +```hocon +ditto.connectivity.connection.encryption { + encryption-enabled = true + symmetrical-key = "YOUR_NEW_KEY_HERE" # Current key for encrypting new data + old-symmetrical-key = "YOUR_OLD_KEY_HERE" # Optional fallback key for decrypting old data + json-pointers = [...] +} +``` + +**Behavior:** +- **Encryption:** Always uses `symmetrical-key` for encrypting new data +- **Decryption:** Tries `symmetrical-key` first, falls back to `old-symmetrical-key` if decryption fails +- **Migration:** Explicit DevOps command re-encrypts existing data from old key to new key + +**Migration Decision Logic:** + +The migration command automatically detects the intended workflow based on configuration: + +- **Encryption enabled + both keys set** → Key rotation (decrypt with old, encrypt with new) +- **Encryption enabled + only current key** → Error (nothing to migrate) +- **Encryption disabled + old key set** → Disable workflow (decrypt with old, write plaintext) +- **Encryption disabled + no keys** → Error (cannot migrate) + +##### Key rotation workflow + +To rotate an encryption key: + +1. **Generate a new encryption key** using the methods described above + +2. **Update configuration** with both keys: + ```hocon + ditto.connectivity.connection.encryption { + encryption-enabled = true + symmetrical-key = "NEW_KEY" # New key + old-symmetrical-key = "OLD_KEY" # Current key becomes old key + } + ``` + +3. **Restart connectivity service** to load the new configuration + +4. **Run dry-run migration** to verify affected documents: + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": true, + "resume": false + } + }' + ``` + +5. **Start actual migration** to re-encrypt all persisted data: + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": false, + "resume": false + } + }' + ``` + +6. **Monitor migration progress**: + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryptionStatus" + } + }' + ``` + +7. **After successful migration**, remove the old key from configuration and restart the service + +**Additional migration commands:** + +- **Abort running migration:** + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryptionAbort" + } + }' + ``` + +- **Resume aborted migration:** + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": false, + "resume": true + } + }' + ``` + + **Note:** If the previous migration already completed, was never started, or only ran as a dry run (which does not + persist progress), the resume command returns `200 OK` with `phase: "already_completed"` instead of starting a new + migration. This makes resume safe to call idempotently. + +**Migration details:** +- The migration processes both connection snapshots and journal events in MongoDB +- Progress is persisted to allow resuming after abort or service restart +- Migration runs in batches to avoid overwhelming the database +- The batch size can be configured via `ditto.connectivity.connection.encryption.migration.batch-size` +- Migration is throttled to prevent database overload (default: 100 documents/minute) +- Throttling rate can be configured via `ditto.connectivity.connection.encryption.migration.max-documents-per-minute` +- Set throttling to 0 to disable (not recommended for production) + +##### Disabling encryption + +To disable encryption while preserving access to already encrypted data: + +1. **Update configuration** with encryption disabled but old key present: + ```hocon + ditto.connectivity.connection.encryption { + encryption-enabled = false + symmetrical-key = "" # Empty - no new encryption + old-symmetrical-key = "YOUR_CURRENT_KEY" # Keep for decryption + } + ``` + +2. **Restart connectivity service** + +3. **Run migration** to decrypt all existing encrypted data: + ```bash + curl -X POST http://localhost:8080/devops/piggyback/connectivity \ + -u devops:devopsPw1! \ + -H 'Content-Type: application/json' \ + -d '{ + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": false, + "resume": false + } + }' + ``` + +4. **After migration completes**, remove the old key from configuration and restart + ### Rate limiting Since Ditto *2.4.0* , by default [connections](basic-connections.html) and [websockets](httpapi-protocol-bindings-websocket.html) From 3dcf373ab9ef8a1b0ec23787a08da469e5212def Mon Sep 17 00:00:00 2001 From: Aleksandar Stanchev Date: Thu, 19 Feb 2026 16:11:55 +0200 Subject: [PATCH 4/5] Some refactoring Signed-off-by: Aleksandar Stanchev --- .../service/ConnectivityRootActor.java | 21 +- .../config/DefaultFieldsEncryptionConfig.java | 9 + .../config/FieldsEncryptionConfig.java | 2 +- .../persistence/EncryptionMigrationActor.java | 893 +++++++----------- .../persistence/JsonFieldsEncryptor.java | 5 +- .../MigrateConnectionEncryption.java | 2 +- .../MigrateConnectionEncryptionAbort.java | 2 +- ...rateConnectionEncryptionAbortResponse.java | 2 +- .../MigrateConnectionEncryptionResponse.java | 2 +- .../MigrateConnectionEncryptionStatus.java | 2 +- ...ateConnectionEncryptionStatusResponse.java | 62 +- .../persistence/MigrationProgress.java | 162 ++++ .../migration/DocumentProcessingResult.java | 88 ++ .../migration/DocumentProcessor.java | 267 ++++++ .../migration/MigrationContext.java | 68 ++ .../migration/MigrationProgressTracker.java | 163 ++++ .../migration/MigrationStreamFactory.java | 195 ++++ .../src/main/resources/connectivity.conf | 19 +- .../EncryptionMigrationActorTest.java | 251 +++-- .../EncryptionMigrationDisableIT.java | 203 ++++ .../persistence/EncryptionMigrationIT.java | 198 ++++ ...ncryptionMigrationInitialEncryptionIT.java | 196 ++++ .../EncryptionMigrationTestHelper.java | 411 ++++++++ .../migration/DocumentProcessorTest.java | 258 +++++ .../templates/connectivity-deployment.yaml | 22 + deployment/helm/ditto/values.yaml | 25 + .../pages/ditto/installation-operating.md | 25 +- 27 files changed, 2828 insertions(+), 725 deletions(-) create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrationProgress.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessingResult.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessor.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationContext.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationProgressTracker.java create mode 100644 connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationStreamFactory.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationDisableIT.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationIT.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationInitialEncryptionIT.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationTestHelper.java create mode 100644 connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessorTest.java diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java index 0001d98b691..5b4a8d7f842 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/ConnectivityRootActor.java @@ -22,6 +22,8 @@ import org.apache.pekko.cluster.sharding.ClusterSharding; import org.apache.pekko.cluster.sharding.ClusterShardingSettings; import org.apache.pekko.event.DiagnosticLoggingAdapter; +import org.apache.pekko.cluster.singleton.ClusterSingletonProxy; +import org.apache.pekko.cluster.singleton.ClusterSingletonProxySettings; import org.apache.pekko.japi.pf.DeciderBuilder; import org.eclipse.ditto.base.service.RootChildActorStarter; import org.eclipse.ditto.base.service.actors.DittoRootActor; @@ -47,6 +49,7 @@ import org.eclipse.ditto.internal.utils.health.config.PersistenceConfig; import org.eclipse.ditto.internal.utils.namespaces.BlockedNamespaces; import org.eclipse.ditto.internal.utils.pekko.logging.DittoLoggerFactory; +import org.eclipse.ditto.internal.utils.persistence.mongo.MongoClientWrapper; import org.eclipse.ditto.internal.utils.persistence.mongo.MongoHealthChecker; import org.eclipse.ditto.internal.utils.persistence.mongo.streaming.MongoReadJournal; import org.eclipse.ditto.internal.utils.persistentactors.PersistencePingActor; @@ -117,8 +120,7 @@ private ConnectivityRootActor(final ConnectivityConfig connectivityConfig, ConnectionPersistenceOperationsActor.props(pubSubMediator, connectivityConfig.getMongoDbConfig(), config, connectivityConfig.getPersistenceOperationsConfig())); - startChildActor(EncryptionMigrationActor.ACTOR_NAME, - EncryptionMigrationActor.props(connectivityConfig)); + startEncryptionMigrationSingleton(actorSystem, connectivityConfig); RootChildActorStarter.get(actorSystem, ScopedConfig.dittoExtension(config)).execute(getContext()); @@ -154,6 +156,21 @@ protected PartialFunction getSupervisio }).build().orElse(super.getSupervisionDecider()); } + private void startEncryptionMigrationSingleton(final ActorSystem actorSystem, + final ConnectivityConfig connectivityConfig) { + final MongoClientWrapper mongoClientWrapper = + MongoClientWrapper.newInstance(connectivityConfig.getMongoDbConfig()); + final String managerName = EncryptionMigrationActor.ACTOR_NAME + "Singleton"; + final ActorRef singletonManager = startClusterSingletonActor( + EncryptionMigrationActor.props(connectivityConfig, mongoClientWrapper), managerName); + + final ClusterSingletonProxySettings proxySettings = + ClusterSingletonProxySettings.create(actorSystem).withRole(CLUSTER_ROLE); + final Props proxyProps = ClusterSingletonProxy.props( + singletonManager.path().toStringWithoutAddress(), proxySettings); + getContext().actorOf(proxyProps, EncryptionMigrationActor.ACTOR_NAME); + } + private ActorRef startClusterSingletonActor(final Props props, final String name) { return ClusterUtil.startSingleton(getContext(), CLUSTER_ROLE, name, props); } diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java index 49e3eeee8fe..16fd136c70e 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/DefaultFieldsEncryptionConfig.java @@ -59,6 +59,15 @@ private void validateConfiguration() { "Missing 'symmetrical-key'. It is mandatory when encryption is enabled for connections!"); } + if (migrationBatchSize <= 0) { + throw new DittoConfigError( + "'migration.batch-size' must be greater than 0, was: " + migrationBatchSize); + } + if (migrationMaxDocumentsPerMinute < 0) { + throw new DittoConfigError( + "'migration.max-documents-per-minute' must be >= 0, was: " + migrationMaxDocumentsPerMinute); + } + // If both keys are set, they must be different if (hasSymmetricalKey && hasOldKey && symmetricalKey.equals(oldSymmetricalKey)) { throw new DittoConfigError( diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java index 76baa630e93..f92ff4390e3 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/config/FieldsEncryptionConfig.java @@ -128,7 +128,7 @@ enum ConfigValue implements KnownConfigValue { * This throttles the migration stream to avoid overwhelming the database. * 0 means no throttling. */ - MIGRATION_MAX_DOCUMENTS_PER_MINUTE("migration.max-documents-per-minute", 100); + MIGRATION_MAX_DOCUMENTS_PER_MINUTE("migration.max-documents-per-minute", 200); private final String configPath; private final Object defaultValue; diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java index b58005f5803..271bf41e0b8 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. @@ -12,13 +12,12 @@ */ package org.eclipse.ditto.connectivity.service.messaging.persistence; -import java.net.URI; import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; -import java.util.stream.Collectors; import javax.annotation.Nullable; @@ -33,34 +32,63 @@ import org.apache.pekko.stream.SharedKillSwitch; import org.apache.pekko.stream.javadsl.Sink; import org.apache.pekko.stream.javadsl.Source; -import org.bson.BsonDocument; import org.bson.Document; -import org.bson.conversions.Bson; -import org.bson.types.ObjectId; -import org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException; import org.eclipse.ditto.connectivity.service.config.ConnectivityConfig; import org.eclipse.ditto.connectivity.service.config.FieldsEncryptionConfig; -import org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.DocumentProcessingResult; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.DocumentProcessor; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.MigrationContext; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.MigrationProgressTracker; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.MigrationStreamFactory; +import org.eclipse.ditto.internal.utils.pekko.logging.DittoDiagnosticLoggingAdapter; +import org.eclipse.ditto.internal.utils.pekko.logging.DittoLoggerFactory; import org.eclipse.ditto.internal.utils.persistence.mongo.MongoClientWrapper; import org.eclipse.ditto.internal.utils.persistence.mongo.config.MongoDbConfig; -import org.eclipse.ditto.json.JsonObject; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.mongodb.client.model.BulkWriteOptions; -import com.mongodb.client.model.Filters; -import com.mongodb.client.model.ReplaceOneModel; -import com.mongodb.client.model.ReplaceOptions; -import com.mongodb.client.model.Sorts; import com.mongodb.client.model.WriteModel; import com.mongodb.reactivestreams.client.MongoCollection; /** - * Actor that performs encryption key migration for persisted connection data. + * Actor that orchestrates encryption key migration for persisted connection data. + *

+ * This actor handles message routing and stream execution for encrypting, re-encrypting, + * or decrypting connection data stored in MongoDB. It supports three migration modes: + *

    + *
  • Initial encryption: Encrypt plaintext data with a new key
  • + *
  • Key rotation: Re-encrypt data from old key to new key
  • + *
  • Disable encryption: Decrypt data back to plaintext
  • + *
+ *

+ * The actor delegates to helper classes for focused responsibilities: + *

    + *
  • {@link DocumentProcessor} - Document transformation and re-encryption logic
  • + *
  • {@link MigrationStreamFactory} - Stream construction with filters and throttling
  • + *
  • {@link MigrationProgressTracker} - Progress persistence for resume support
  • + *
+ *

+ * Migration Flow: + *

    + *
  1. Receives {@link MigrateConnectionEncryption} command
  2. + *
  3. Validates encryption configuration
  4. + *
  5. Processes snapshot collection (connection_snaps)
  6. + *
  7. Processes journal collection (connection_journal)
  8. + *
  9. Saves progress after each batch for resume capability
  10. + *
  11. Returns {@link MigrateConnectionEncryptionResponse} immediately (202 Accepted)
  12. + *
*

- * Reads snapshots and journal events from MongoDB, decrypts with the old key, - * re-encrypts with the new key, and batch-updates documents using Pekko Streams. - * Supports dry-run, resumption, and progress tracking. + * Features: + *

    + *
  • Dry-run mode for validation without modifying data
  • + *
  • Resume support after abort or service restart
  • + *
  • Throttling to limit database load
  • + *
  • Abort capability via {@link MigrateConnectionEncryptionAbort}
  • + *
  • Status queries via {@link MigrateConnectionEncryptionStatus}
  • + *
+ * + * @see DocumentProcessor + * @see MigrationStreamFactory + * @see MigrationProgressTracker + * @see MigrationProgress */ public final class EncryptionMigrationActor extends AbstractActor { @@ -69,7 +97,6 @@ public final class EncryptionMigrationActor extends AbstractActor { */ public static final String ACTOR_NAME = "encryptionMigration"; - private static final Logger LOG = LoggerFactory.getLogger(EncryptionMigrationActor.class); private static final String SNAPSHOT_COLLECTION = "connection_snaps"; private static final String JOURNAL_COLLECTION = "connection_journal"; @@ -78,42 +105,30 @@ public final class EncryptionMigrationActor extends AbstractActor { private static final String PHASE_SNAPSHOTS = "snapshots"; private static final String PHASE_JOURNAL = "journal"; private static final String PHASE_COMPLETED = "completed"; + private static final String PHASE_ABORTED_PREFIX = "aborted:"; - private static final String PROGRESS_ID = "current"; - - // MongoDB field names for pekko-persistence-mongodb - private static final String SNAPSHOT_SERIALIZED_FIELD = "s2"; - private static final String JOURNAL_EVENTS_FIELD = "events"; - private static final String JOURNAL_PAYLOAD_FIELD = "p"; + // MongoDB field name for document ID private static final String ID_FIELD = "_id"; - // Entity type prefix used for journal event encryption - private static final String JOURNAL_ENTITY_TYPE_PREFIX = "connection"; - // Snapshot encryption uses empty prefix - private static final String SNAPSHOT_ENTITY_TYPE_PREFIX = ""; + private final DittoDiagnosticLoggingAdapter log = DittoLoggerFactory.getDiagnosticLoggingAdapter(this); - private final MongoClientWrapper mongoClient; private final FieldsEncryptionConfig encryptionConfig; private final Materializer materializer; private final MongoCollection snapshotCollection; private final MongoCollection journalCollection; - private final MongoCollection progressCollection; + private final MigrationProgressTracker progressTracker; private final int batchSize; private final int maxDocumentsPerMinute; private boolean migrationInProgress = false; private boolean currentDryRun = false; private volatile boolean abortRequested = false; - @Nullable private SharedKillSwitch activeKillSwitch; @Nullable - private volatile MigrationProgress currentProgress; + private MigrationProgress currentProgress; - @SuppressWarnings("unused") - private EncryptionMigrationActor(final ConnectivityConfig connectivityConfig) { - final MongoDbConfig mongoDbConfig = connectivityConfig.getMongoDbConfig(); + private EncryptionMigrationActor(final ConnectivityConfig connectivityConfig, final MongoClientWrapper mongoClient) { this.encryptionConfig = connectivityConfig.getConnectionConfig().getFieldsEncryptionConfig(); - this.mongoClient = MongoClientWrapper.newInstance(mongoDbConfig); this.materializer = Materializer.createMaterializer(this::getContext); this.batchSize = encryptionConfig.getMigrationBatchSize(); this.maxDocumentsPerMinute = encryptionConfig.getMigrationMaxDocumentsPerMinute(); @@ -121,7 +136,8 @@ private EncryptionMigrationActor(final ConnectivityConfig connectivityConfig) { final var db = mongoClient.getDefaultDatabase(); this.snapshotCollection = db.getCollection(SNAPSHOT_COLLECTION); this.journalCollection = db.getCollection(JOURNAL_COLLECTION); - this.progressCollection = db.getCollection(PROGRESS_COLLECTION); + final MongoCollection progressCollection = db.getCollection(PROGRESS_COLLECTION); + this.progressTracker = new MigrationProgressTracker(progressCollection, materializer); } /** @@ -130,14 +146,8 @@ private EncryptionMigrationActor(final ConnectivityConfig connectivityConfig) { * @param connectivityConfig the connectivity configuration. * @return the Props. */ - public static Props props(final ConnectivityConfig connectivityConfig) { - return Props.create(EncryptionMigrationActor.class, connectivityConfig); - } - - @Override - public void postStop() throws Exception { - mongoClient.close(); - super.postStop(); + public static Props props(final ConnectivityConfig connectivityConfig, final MongoClientWrapper mongoClient) { + return Props.create(EncryptionMigrationActor.class, connectivityConfig, mongoClient); } @Override @@ -146,44 +156,81 @@ public Receive createReceive() { .match(MigrateConnectionEncryption.class, this::handleMigration) .match(MigrateConnectionEncryptionAbort.class, this::handleAbort) .match(MigrateConnectionEncryptionStatus.class, this::handleStatus) + .match(ProgressUpdate.class, this::handleProgressUpdate) + .match(MigrationCompleted.class, this::handleMigrationCompleted) .build(); } + private void handleProgressUpdate(final ProgressUpdate update) { + this.currentProgress = update.progress; + } + + private void handleMigrationCompleted(final MigrationCompleted completed) { + migrationInProgress = false; + activeKillSwitch = null; + final boolean wasAborted = abortRequested; + abortRequested = false; + final MigrationProgress progress = completed.progress; + final boolean dryRun = completed.dryRun; + + if (completed.error != null && !wasAborted) { + log.error("Encryption migration failed", completed.error); + } else { + final String finalPhase = wasAborted + ? "aborted:" + (progress != null ? progress.phase() : "unknown") + : (progress != null ? progress.phase() : "unknown"); + log.info("Encryption migration {} (dryRun={}): snapshots(p={}/s={}/f={}), " + + "journal(p={}/s={}/f={}), finalPhase={}", + wasAborted ? "aborted" : "completed", dryRun, + progress != null ? progress.snapshotsProcessed() : 0, + progress != null ? progress.snapshotsSkipped() : 0, + progress != null ? progress.snapshotsFailed() : 0, + progress != null ? progress.journalProcessed() : 0, + progress != null ? progress.journalSkipped() : 0, + progress != null ? progress.journalFailed() : 0, + finalPhase); + if (progress != null) { + currentProgress = progress.withPhase(finalPhase); + if (wasAborted && !dryRun) { + progressTracker.saveProgressWithRetry(progress.withPhase(finalPhase), 2) + .whenComplete((v, saveErr) -> { + if (saveErr != null) { + log.error("Failed to save abort progress after retries: {}", + saveErr.getMessage()); + } else { + log.info("Abort progress saved successfully: phase={}", + finalPhase); + } + }); + } + } + } + } + private void handleStatus(final MigrateConnectionEncryptionStatus command) { final ActorRef sender = getSender(); final MigrationProgress inMemory = currentProgress; if (inMemory != null) { // Use in-memory progress (available during and after migration, including dry-run) final String phase = migrationInProgress - ? "in_progress:" + inMemory.phase - : inMemory.phase; + ? "in_progress:" + inMemory.phase() + : inMemory.phase(); sender.tell(MigrateConnectionEncryptionStatusResponse.of( - phase, - inMemory.snapshotsProcessed, inMemory.snapshotsSkipped, inMemory.snapshotsFailed, - inMemory.journalProcessed, inMemory.journalSkipped, inMemory.journalFailed, - inMemory.lastProcessedSnapshotId, inMemory.lastProcessedSnapshotPid, - inMemory.lastProcessedJournalId, inMemory.lastProcessedJournalPid, - inMemory.startedAt, Instant.now().toString(), - currentDryRun, - migrationInProgress, + phase, inMemory, currentDryRun, migrationInProgress, command.getDittoHeaders()), getSelf()); } else { // Fall back to MongoDB (e.g. after service restart) - loadProgress().whenComplete((optProgress, error) -> { + final ActorRef self = getSelf(); + progressTracker.loadProgress().thenApply(optProgress -> { + final MigrationProgress progress = optProgress.orElseGet(MigrationProgress::new); + return MigrateConnectionEncryptionStatusResponse.of( + progress.phase(), progress, false, false, + command.getDittoHeaders()); + }).whenComplete((response, error) -> { if (error != null) { - sender.tell(new Status.Failure(error), getSelf()); + sender.tell(new Status.Failure(error), self); } else { - final MigrationProgress progress = optProgress.orElseGet(MigrationProgress::new); - sender.tell(MigrateConnectionEncryptionStatusResponse.of( - progress.phase, - progress.snapshotsProcessed, progress.snapshotsSkipped, progress.snapshotsFailed, - progress.journalProcessed, progress.journalSkipped, progress.journalFailed, - progress.lastProcessedSnapshotId, progress.lastProcessedSnapshotPid, - progress.lastProcessedJournalId, progress.lastProcessedJournalPid, - progress.startedAt, Instant.now().toString(), - false, - false, - command.getDittoHeaders()), getSelf()); + sender.tell(response, self); } }); } @@ -198,7 +245,7 @@ private void handleAbort(final MigrateConnectionEncryptionAbort command) { return; } - LOG.info("Abort requested for running migration"); + log.info("Abort requested for running migration"); abortRequested = true; if (activeKillSwitch != null) { activeKillSwitch.shutdown(); @@ -206,9 +253,9 @@ private void handleAbort(final MigrateConnectionEncryptionAbort command) { final MigrationProgress progress = currentProgress != null ? currentProgress : new MigrationProgress(); sender.tell(MigrateConnectionEncryptionAbortResponse.of( - "aborted:" + progress.phase, - progress.snapshotsProcessed, progress.snapshotsSkipped, progress.snapshotsFailed, - progress.journalProcessed, progress.journalSkipped, progress.journalFailed, + "aborted:" + progress.phase(), + progress.snapshotsProcessed(), progress.snapshotsSkipped(), progress.snapshotsFailed(), + progress.journalProcessed(), progress.journalSkipped(), progress.journalFailed(), Instant.now().toString(), command.getDittoHeaders()), getSelf()); } @@ -267,610 +314,346 @@ private void handleMigration(final MigrateConnectionEncryption command) { currentProgress = null; activeKillSwitch = KillSwitches.shared("encryption-migration"); final String mode = isDisableWorkflow ? "disable" : isInitialEncryption ? "initial-encryption" : "key-rotation"; - LOG.info("Starting encryption migration (mode={}, dryRun={}, resume={})", mode, dryRun, resume); + log.info("Starting encryption migration (mode={}, dryRun={}, resume={})", mode, dryRun, resume); final CompletionStage migrationResult; if (resume) { - migrationResult = loadProgress().thenCompose(optProgress -> { - if (optProgress.isEmpty() || PHASE_COMPLETED.equals(optProgress.get().phase)) { + migrationResult = progressTracker.loadProgress().thenCompose(optProgress -> { + if (optProgress.isEmpty() || PHASE_COMPLETED.equals(optProgress.get().phase())) { // No previous migration exists or it already completed — nothing to resume final String reason = optProgress.isEmpty() ? "no previous migration found" : "previous migration already completed"; - LOG.info("Resume requested but {}, nothing to do", reason); + log.info("Resume requested but {}, nothing to do", reason); migrationInProgress = false; final MigrationProgress completed = optProgress.orElseGet(MigrationProgress::new) .withPhase(PHASE_COMPLETED); currentProgress = completed; sender.tell(MigrateConnectionEncryptionResponse.alreadyCompleted( Instant.now().toString(), command.getDittoHeaders()), getSelf()); - return java.util.concurrent.CompletableFuture.completedFuture(completed); + return CompletableFuture.completedFuture(completed); } final MigrationProgress progress = optProgress.get(); + log.info("Resuming migration from saved progress: phase={}, lastSnapshotId={}, " + + "lastJournalId={}", progress.phase(), + progress.lastProcessedSnapshotId(), progress.lastProcessedJournalId()); sender.tell(MigrateConnectionEncryptionResponse.accepted( true, Instant.now().toString(), dryRun, command.getDittoHeaders()), getSelf()); return runMigration(progress, oldKey, newKey, pointers, dryRun); }); } else { - migrationResult = deleteProgress().thenCompose(v -> + migrationResult = progressTracker.deleteProgress().thenCompose(v -> runMigration(new MigrationProgress(), oldKey, newKey, pointers, dryRun)); // Reply immediately with 202 Accepted sender.tell(MigrateConnectionEncryptionResponse.accepted( false, Instant.now().toString(), dryRun, command.getDittoHeaders()), getSelf()); } - migrationResult.whenComplete((progress, error) -> { - migrationInProgress = false; - activeKillSwitch = null; - final boolean wasAborted = abortRequested; - abortRequested = false; - if (error != null && !wasAborted) { - LOG.error("Encryption migration failed", error); - } else { - final String finalPhase = wasAborted - ? "aborted:" + (progress != null ? progress.phase : "unknown") - : progress.phase; - LOG.info("Encryption migration {} (dryRun={}): {}", - wasAborted ? "aborted" : "completed", dryRun, progress); - if (progress != null) { - currentProgress = progress.withPhase(finalPhase); - if (wasAborted && !dryRun) { - saveProgress(progress.withPhase("aborted")).toCompletableFuture().join(); - } - } - } - }); + final ActorRef self = getSelf(); + migrationResult.whenComplete((progress, error) -> + self.tell(new MigrationCompleted(progress, error, dryRun), ActorRef.noSender())); } + /** + * Runs the encryption migration process. + *

+ * Processes both snapshot and journal collections in sequence. Handles resume logic + * by checking the initial progress phase and skipping completed phases. + * + * @param initialProgress the starting progress (from resume or fresh start) + * @param oldKey the old encryption key (null for initial encryption) + * @param newKey the new encryption key (null for disable workflow) + * @param pointers the JSON pointers to encrypt/decrypt + * @param dryRun if true, validate without writing changes + * @return completion stage with final migration progress + */ private CompletionStage runMigration(final MigrationProgress initialProgress, final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + log.info("runMigration: loaded progress phase={}, lastSnapshotId={}, lastJournalId={}, " + + "snapshots(p={}/s={}/f={}), journal(p={}/s={}/f={})", + initialProgress.phase(), initialProgress.lastProcessedSnapshotId(), + initialProgress.lastProcessedJournalId(), + initialProgress.snapshotsProcessed(), initialProgress.snapshotsSkipped(), + initialProgress.snapshotsFailed(), + initialProgress.journalProcessed(), initialProgress.journalSkipped(), + initialProgress.journalFailed()); + + // Strip "aborted:" prefix to get the effective phase for resume + final String effectivePhase; + if (initialProgress.phase().startsWith(PHASE_ABORTED_PREFIX)) { + effectivePhase = initialProgress.phase().substring(PHASE_ABORTED_PREFIX.length()); + log.info("Stripped aborted prefix: effective phase={}", effectivePhase); + } else { + effectivePhase = initialProgress.phase(); + } + final CompletionStage afterSnapshots; - if (PHASE_JOURNAL.equals(initialProgress.phase)) { - // Resume from journal phase — snapshots already done - afterSnapshots = java.util.concurrent.CompletableFuture.completedFuture(initialProgress); - } else if (PHASE_COMPLETED.equals(initialProgress.phase)) { - return java.util.concurrent.CompletableFuture.completedFuture(initialProgress); + if (PHASE_JOURNAL.equals(effectivePhase) || initialProgress.lastProcessedJournalId() != null) { + // Resume from journal phase — snapshots already done. + // Belt-and-suspenders: also skip snapshots if journal progress exists (handles legacy "aborted" format) + if (initialProgress.lastProcessedJournalId() != null && !PHASE_JOURNAL.equals(effectivePhase)) { + log.info("Skipping snapshots: lastProcessedJournalId is set (legacy aborted format)"); + } + afterSnapshots = CompletableFuture.completedFuture(initialProgress); + } else if (PHASE_COMPLETED.equals(effectivePhase)) { + return CompletableFuture.completedFuture(initialProgress); } else { afterSnapshots = migrateSnapshots(initialProgress, oldKey, newKey, pointers, dryRun); } return afterSnapshots.thenCompose(progress -> { if (abortRequested) { - return java.util.concurrent.CompletableFuture.completedFuture(progress); + return CompletableFuture.completedFuture(progress); } final MigrationProgress journalProgress = progress.withPhase(PHASE_JOURNAL); return migrateJournal(journalProgress, oldKey, newKey, pointers, dryRun); }).thenCompose(progress -> { if (abortRequested) { - return java.util.concurrent.CompletableFuture.completedFuture(progress); + return CompletableFuture.completedFuture(progress); } final MigrationProgress completed = progress.withPhase(PHASE_COMPLETED); if (!dryRun) { - return saveProgress(completed).thenApply(v -> completed); + return progressTracker.saveProgress(completed).thenApply(v -> completed); } - return java.util.concurrent.CompletableFuture.completedFuture(completed); + return CompletableFuture.completedFuture(completed); }); } + /** + * Migrates the snapshot collection (connection_snaps). + *

+ * Builds a stream from {@link MigrationStreamFactory}, applies the kill switch for abort + * support, groups documents into batches, and processes each batch via + * {@link #processSnapshotBatch}. + * + * @param progress the current migration progress + * @param oldKey the old encryption key + * @param newKey the new encryption key + * @param pointers the JSON pointers to encrypt/decrypt + * @param dryRun if true, validate without writing changes + * @return completion stage with updated progress after snapshot migration + */ private CompletionStage migrateSnapshots(final MigrationProgress progress, final String oldKey, final String newKey, final List pointers, final boolean dryRun) { - LOG.info("Starting snapshot migration (dryRun={}, throttling={} docs/min)", dryRun, + log.info("Starting snapshot migration (dryRun={}, throttling={} docs/min)", dryRun, maxDocumentsPerMinute > 0 ? maxDocumentsPerMinute : "disabled"); - final Bson resumeFilter = progress.lastProcessedSnapshotId != null - ? Filters.gt(ID_FIELD, progress.lastProcessedSnapshotId) - : Filters.empty(); - final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( - SNAPSHOT_SERIALIZED_FIELD, SNAPSHOT_ENTITY_TYPE_PREFIX, pointers); - final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); - final Source source = Source.fromPublisher( - snapshotCollection.find(filter) - .sort(Sorts.ascending(ID_FIELD)) - .batchSize(batchSize)); + final Source source = MigrationStreamFactory.buildSnapshotStream( + snapshotCollection, progress, pointers, batchSize, maxDocumentsPerMinute); - final Source throttledSource = applyThrottling(source, maxDocumentsPerMinute); - - return throttledSource + return source .via(activeKillSwitch.flow()) .grouped(batchSize) - .runWith(Sink.fold(progress, (currentProgress, batch) -> + .runWith(Sink.foldAsync(progress, (currentProgress, batch) -> processSnapshotBatch(currentProgress, batch, oldKey, newKey, pointers, dryRun)), materializer) .thenApply(finalProgress -> { - LOG.info("Snapshot migration {}: processed={}, skipped={}, failed={}", + log.info("Snapshot migration {}: processed={}, skipped={}, failed={}", abortRequested ? "aborted" : "done", - finalProgress.snapshotsProcessed, finalProgress.snapshotsSkipped, - finalProgress.snapshotsFailed); + finalProgress.snapshotsProcessed(), finalProgress.snapshotsSkipped(), + finalProgress.snapshotsFailed()); return finalProgress; }); } - private MigrationProgress processSnapshotBatch(final MigrationProgress progress, + /** + * Processes a batch of snapshot documents. + *

+ * Iterates through each document, delegates transformation to {@link DocumentProcessor}, + * updates progress counters, collects write models, and delegates batch write to + * {@link #executeBatchWriteAndSaveProgress}. + * + * @param progress the current progress + * @param batch the batch of documents to process + * @param oldKey the old encryption key + * @param newKey the new encryption key + * @param pointers the JSON pointers to encrypt/decrypt + * @param dryRun if true, skip database writes + * @return completion stage with updated progress + */ + private CompletionStage processSnapshotBatch(final MigrationProgress progress, final List batch, final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + log.debug("processSnapshotBatch: batchSize={}, firstId={}, lastId={}", + batch.size(), + batch.isEmpty() ? "N/A" : batch.get(0).get(ID_FIELD), + batch.isEmpty() ? "N/A" : batch.get(batch.size() - 1).get(ID_FIELD)); MigrationProgress currentProgress = progress; final List> writeModels = new ArrayList<>(); for (final Document doc : batch) { final String docId = doc.get(ID_FIELD).toString(); final String pid = doc.getString("pid"); - try { - final Document s2 = doc.get(SNAPSHOT_SERIALIZED_FIELD, Document.class); - if (s2 == null) { - currentProgress = currentProgress.incrementSnapshotsSkipped(); - // Update last processed ID and PID even when skipped - currentProgress = currentProgress.withLastSnapshotId(docId); - currentProgress = currentProgress.withLastSnapshotPid(pid); - continue; - } - - final BsonDocument bsonDoc = s2.toBsonDocument(Document.class, - com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); - final JsonObject jsonObject = DittoBsonJson.getInstance().serialize(bsonDoc); - - final JsonObject reEncrypted = reEncryptFields(jsonObject, SNAPSHOT_ENTITY_TYPE_PREFIX, - pointers, oldKey, newKey); - - if (reEncrypted == null) { - // Already encrypted with new key - currentProgress = currentProgress.incrementSnapshotsSkipped(); - } else { - if (!dryRun) { - final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted); - doc.put(SNAPSHOT_SERIALIZED_FIELD, Document.parse(newBson.toJson())); - writeModels.add(new ReplaceOneModel<>( - Filters.eq(ID_FIELD, doc.get(ID_FIELD)), - doc)); - } - currentProgress = currentProgress.incrementSnapshotsProcessed(); - } - } catch (final Exception e) { - LOG.warn("Failed to process snapshot {} (pid={}): {}", docId, pid, e.getMessage()); - currentProgress = currentProgress.incrementSnapshotsFailed(); + final DocumentProcessingResult result = processSnapshotDocument(doc, oldKey, newKey, pointers, dryRun); + switch (result.outcome()) { + case PROCESSED -> currentProgress = currentProgress.incrementSnapshotsProcessed(); + case SKIPPED -> currentProgress = currentProgress.incrementSnapshotsSkipped(); + case FAILED -> currentProgress = currentProgress.incrementSnapshotsFailed(); } - // Update last processed ID and PID for EVERY document - currentProgress = currentProgress.withLastSnapshotId(docId); - currentProgress = currentProgress.withLastSnapshotPid(pid); - } - - // Perform bulk write if there are changes - if (!dryRun && !writeModels.isEmpty()) { - try { - Source.fromPublisher(snapshotCollection.bulkWrite(writeModels, - new BulkWriteOptions().ordered(false))) - .runWith(Sink.head(), materializer) - .toCompletableFuture().join(); - } catch (final Exception e) { - LOG.error("Bulk write failed for snapshot batch: {}", e.getMessage()); - // Continue to save progress even if bulk write fails + if (result.writeModel() != null) { + writeModels.add(result.writeModel()); } + currentProgress = currentProgress + .withLastSnapshotId(docId) + .withLastSnapshotPid(pid); } - // Save progress to MongoDB for non-dry-run; always update in-memory for status queries - if (!dryRun) { - final MigrationProgress progressToSave = currentProgress.withPhase(PHASE_SNAPSHOTS); - try { - saveProgress(progressToSave).toCompletableFuture().join(); - } catch (final Exception e) { - LOG.error("Failed to save progress after snapshot batch: {}", e.getMessage()); - } - } - this.currentProgress = currentProgress; + return executeBatchWriteAndSaveProgress( + currentProgress, writeModels, snapshotCollection, PHASE_SNAPSHOTS, dryRun, true); + } - return currentProgress; + private DocumentProcessingResult processSnapshotDocument(final Document doc, final String oldKey, + final String newKey, final List pointers, final boolean dryRun) { + final MigrationContext context = MigrationContext.forSnapshots(oldKey, newKey, pointers); + return DocumentProcessor.processSnapshotDocument(doc, context, dryRun); } + /** + * Migrates the journal collection (connection_journal). + *

+ * Similar to {@link #migrateSnapshots} but processes journal event documents which + * contain arrays of events with payloads to re-encrypt. + * + * @param progress the current migration progress + * @param oldKey the old encryption key + * @param newKey the new encryption key + * @param pointers the JSON pointers to encrypt/decrypt + * @param dryRun if true, validate without writing changes + * @return completion stage with updated progress after journal migration + */ private CompletionStage migrateJournal(final MigrationProgress progress, final String oldKey, final String newKey, final List pointers, final boolean dryRun) { - LOG.info("Starting journal migration (dryRun={}, throttling={} docs/min)", dryRun, + log.info("Starting journal migration (dryRun={}, throttling={} docs/min)", dryRun, maxDocumentsPerMinute > 0 ? maxDocumentsPerMinute : "disabled"); - final Bson resumeFilter = progress.lastProcessedJournalId != null - ? Filters.gt(ID_FIELD, new ObjectId(progress.lastProcessedJournalId)) - : Filters.empty(); - final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( - JOURNAL_EVENTS_FIELD + "." + JOURNAL_PAYLOAD_FIELD, - JOURNAL_ENTITY_TYPE_PREFIX, pointers); - final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); - - final Source source = Source.fromPublisher( - journalCollection.find(filter) - .sort(Sorts.ascending(ID_FIELD)) - .batchSize(batchSize)); - - final Source throttledSource = applyThrottling(source, maxDocumentsPerMinute); - - return throttledSource + + final Source source = MigrationStreamFactory.buildJournalStream( + journalCollection, progress, pointers, batchSize, maxDocumentsPerMinute); + + return source .via(activeKillSwitch.flow()) .grouped(batchSize) - .runWith(Sink.fold(progress, (currentProgress, batch) -> + .runWith(Sink.foldAsync(progress, (currentProgress, batch) -> processJournalBatch(currentProgress, batch, oldKey, newKey, pointers, dryRun)), materializer) .thenApply(finalProgress -> { - LOG.info("Journal migration {}: processed={}, skipped={}, failed={}", + log.info("Journal migration {}: processed={}, skipped={}, failed={}", abortRequested ? "aborted" : "done", - finalProgress.journalProcessed, finalProgress.journalSkipped, - finalProgress.journalFailed); + finalProgress.journalProcessed(), finalProgress.journalSkipped(), + finalProgress.journalFailed()); return finalProgress; }); } - private MigrationProgress processJournalBatch(final MigrationProgress progress, + private CompletionStage processJournalBatch(final MigrationProgress progress, final List batch, final String oldKey, final String newKey, final List pointers, final boolean dryRun) { + log.debug("processJournalBatch: batchSize={}, firstId={}, lastId={}", + batch.size(), + batch.isEmpty() ? "N/A" : batch.getFirst().get(ID_FIELD), + batch.isEmpty() ? "N/A" : batch.getLast().get(ID_FIELD)); MigrationProgress currentProgress = progress; final List> writeModels = new ArrayList<>(); for (final Document doc : batch) { - final Object docId = doc.get(ID_FIELD); - final String docIdStr = docId.toString(); + final String docIdStr = doc.get(ID_FIELD).toString(); final String pid = doc.getString("pid"); - try { - final List events = doc.getList(JOURNAL_EVENTS_FIELD, Document.class); - if (events == null || events.isEmpty()) { - currentProgress = currentProgress.incrementJournalSkipped(); - // Update last processed ID and PID even when skipped - currentProgress = currentProgress.withLastJournalId(docIdStr); - currentProgress = currentProgress.withLastJournalPid(pid); - continue; - } - - boolean anyChanged = false; - final List updatedEvents = new ArrayList<>(events.size()); - - for (final Document event : events) { - final Document payload = event.get(JOURNAL_PAYLOAD_FIELD, Document.class); - if (payload == null) { - updatedEvents.add(event); - continue; - } - - final BsonDocument bsonPayload = payload.toBsonDocument(Document.class, - com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); - final JsonObject jsonPayload = DittoBsonJson.getInstance().serialize(bsonPayload); - - final JsonObject reEncrypted = reEncryptFields(jsonPayload, JOURNAL_ENTITY_TYPE_PREFIX, - pointers, oldKey, newKey); - - if (reEncrypted != null) { - if (!dryRun) { - final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted); - event.put(JOURNAL_PAYLOAD_FIELD, Document.parse(newBson.toJson())); - } - anyChanged = true; - } - updatedEvents.add(event); - } - - if (anyChanged) { - if (!dryRun) { - doc.put(JOURNAL_EVENTS_FIELD, updatedEvents); - writeModels.add(new ReplaceOneModel<>( - Filters.eq(ID_FIELD, docId), - doc)); - } - currentProgress = currentProgress.incrementJournalProcessed(); - } else { - currentProgress = currentProgress.incrementJournalSkipped(); - } - } catch (final Exception e) { - LOG.warn("Failed to process journal document {} (pid={}): {}", docIdStr, pid, e.getMessage()); - currentProgress = currentProgress.incrementJournalFailed(); + final DocumentProcessingResult result = processJournalDocument(doc, oldKey, newKey, pointers, dryRun); + switch (result.outcome()) { + case PROCESSED -> currentProgress = currentProgress.incrementJournalProcessed(); + case SKIPPED -> currentProgress = currentProgress.incrementJournalSkipped(); + case FAILED -> currentProgress = currentProgress.incrementJournalFailed(); + } + if (result.writeModel() != null) { + writeModels.add(result.writeModel()); } - // Update last processed ID and PID for EVERY document currentProgress = currentProgress.withLastJournalId(docIdStr); currentProgress = currentProgress.withLastJournalPid(pid); } - // Perform bulk write if there are changes - if (!dryRun && !writeModels.isEmpty()) { - try { - Source.fromPublisher(journalCollection.bulkWrite(writeModels, - new BulkWriteOptions().ordered(false))) - .runWith(Sink.head(), materializer) - .toCompletableFuture().join(); - } catch (final Exception e) { - LOG.error("Bulk write failed for journal batch: {}", e.getMessage()); - // Continue to save progress even if bulk write fails - } - } - - // Save progress to MongoDB for non-dry-run; always update in-memory for status queries - if (!dryRun) { - final MigrationProgress progressToSave = currentProgress.withPhase(PHASE_JOURNAL); - try { - saveProgress(progressToSave).toCompletableFuture().join(); - } catch (final Exception e) { - LOG.error("Failed to save progress after journal batch: {}", e.getMessage()); - } - } - this.currentProgress = currentProgress; - - return currentProgress; + return executeBatchWriteAndSaveProgress( + currentProgress, writeModels, journalCollection, PHASE_JOURNAL, dryRun, false); } - /** - * Applies throttling to the source stream if throttling is enabled (maxDocsPerMinute > 0). - * Throttling is implemented using Pekko Streams throttle operator. - * - * @param source the source stream - * @param maxDocsPerMinute maximum documents per minute, 0 means no throttling - * @return throttled source if enabled, original source otherwise - */ - private Source applyThrottling(final Source source, - final int maxDocsPerMinute) { - if (maxDocsPerMinute <= 0) { - return source; - } - - // Throttle directly using the configured docs/minute rate. - // Pekko Streams throttle uses a token-bucket algorithm, so bursts up to maxDocsPerMinute - // are allowed as long as the average rate stays within the limit. - return source.throttle(maxDocsPerMinute, java.time.Duration.ofMinutes(1)); + private DocumentProcessingResult processJournalDocument(final Document doc, final String oldKey, + final String newKey, final List pointers, final boolean dryRun) { + final MigrationContext context = MigrationContext.forJournal(oldKey, newKey, pointers); + return DocumentProcessor.processJournalDocument(doc, context, dryRun); } /** - * Re-encrypts fields in a JSON object based on the migration mode. + * Executes a batch write to MongoDB and saves migration progress. + *

+ * Coordinates the batch write operation, handles failures by adjusting progress counters, + * saves progress via {@link MigrationProgressTracker}, and sends progress updates to + * the actor for in-memory tracking. * - *

Supports three modes: - *

    - *
  • Initial encryption ({@code oldKey == null}): encrypt plaintext with newKey
  • - *
  • Key rotation (both keys set): decrypt with oldKey, encrypt with newKey
  • - *
  • Disable encryption ({@code newKey == null}): decrypt with oldKey, write plaintext
  • - *
- * - * @param oldKey the old encryption key, or {@code null} for initial encryption (plaintext data) - * @param newKey the new encryption key, or {@code null} to disable encryption (write plaintext) - * @return the transformed JSON object, or {@code null} if already in the desired state (skip). + * @param progress the current progress + * @param writeModels the MongoDB write models for bulk update + * @param collection the MongoDB collection to write to + * @param phase the current migration phase (for logging and progress tracking) + * @param dryRun if true, skip database writes + * @param isSnapshot true for snapshot collection, false for journal collection + * @return completion stage with progress after write and save */ - static JsonObject reEncryptFields(final JsonObject jsonObject, final String entityTypePrefix, - final List pointers, @Nullable final String oldKey, @Nullable final String newKey) { - - if (oldKey == null && newKey != null) { - // Initial encryption: data is plaintext, encrypt with new key - // Check if any field already has the encrypted_ prefix — if so, skip - final boolean alreadyEncrypted = pointers.stream() - .map(p -> entityTypePrefix + p) - .map(org.eclipse.ditto.json.JsonPointer::of) - .flatMap(pointer -> jsonObject.getValue(pointer).stream()) - .filter(org.eclipse.ditto.json.JsonValue::isString) - .anyMatch(v -> containsEncryptedValue(v.asString())); - if (alreadyEncrypted) { - return null; - } - final JsonObject encrypted = JsonFieldsEncryptor.encrypt(jsonObject, entityTypePrefix, pointers, newKey); - // Skip if encrypt produced no changes (e.g. no matching pointers in this entity) - return encrypted.equals(jsonObject) ? null : encrypted; + private CompletionStage executeBatchWriteAndSaveProgress( + final MigrationProgress progress, + final List> writeModels, + final MongoCollection collection, + final String phase, + final boolean dryRun, + final boolean isSnapshot) { + + final int batchWriteCount = writeModels.size(); + log.debug("executeBatchWrite: phase={}, writeModels={}, dryRun={}", phase, batchWriteCount, dryRun); + final CompletionStage writeStage; + if (!dryRun && !writeModels.isEmpty()) { + writeStage = Source.fromPublisher(collection.bulkWrite(writeModels, + new BulkWriteOptions().ordered(false))) + .runWith(Sink.head(), materializer) + .thenApply(r -> { + log.debug("Bulk write completed for {} batch: {} documents written", + phase, r.getModifiedCount() + r.getInsertedCount()); + return progress; + }) + .exceptionally(e -> { + log.error("Bulk write failed for {} batch ({} documents): {}", + phase, batchWriteCount, e.getMessage()); + return progress.adjustForBulkWriteFailure(batchWriteCount, isSnapshot); + }); + } else { + writeStage = CompletableFuture.completedFuture(progress); } - // Key rotation or disable workflow — oldKey must be set - // Try decrypting with the old key - try { - final JsonObject decrypted = JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, - pointers, oldKey); - - if (newKey == null) { - // Disable workflow: return decrypted plaintext, but skip if nothing changed - // (decrypt silently passes through plaintext values, so unchanged means already plain) - return decrypted.equals(jsonObject) ? null : decrypted; - } else { - // Key rotation: re-encrypt with new key - final JsonObject reEncrypted = JsonFieldsEncryptor.encrypt(decrypted, entityTypePrefix, pointers, newKey); - // Skip if the result is identical (e.g. no matching pointers in this entity) - return reEncrypted.equals(jsonObject) ? null : reEncrypted; - } - } catch (final ConnectionConfigurationInvalidException e) { - // Old key failed — try new key to see if already migrated - // (Only applicable for key rotation, not disable workflow) - if (newKey == null) { - // Disable workflow: if old key fails, data is already plaintext - skip - return null; - } - - try { - JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, pointers, newKey); - // Already encrypted with new key — skip - return null; - } catch (final ConnectionConfigurationInvalidException e2) { - // Both keys failed — data might be plaintext, try encrypting directly - final JsonObject encrypted = JsonFieldsEncryptor.encrypt(jsonObject, entityTypePrefix, pointers, newKey); - return encrypted.equals(jsonObject) ? null : encrypted; + final ActorRef self = getSelf(); + return writeStage.thenCompose(progressAfterWrite -> { + if (!dryRun) { + final MigrationProgress progressToSave = progressAfterWrite.withPhase(phase); + return progressTracker.saveProgress(progressToSave) + .thenApply(_ -> progressAfterWrite) + .exceptionally(e -> { + log.error("Failed to save progress after {} batch: {}", phase, e.getMessage()); + return progressAfterWrite; + }); } - } + return CompletableFuture.completedFuture(progressAfterWrite); + }).thenApply(p -> { + self.tell(new ProgressUpdate(p), ActorRef.noSender()); + return p; + }); } /** - * Checks whether a string value contains an encrypted portion — either as a direct - * {@code encrypted_} prefix (non-URI fields) or embedded in the password part of a URI. + * Internal message used to pipe progress updates back to the actor thread. */ - private static boolean containsEncryptedValue(final String value) { - if (value.startsWith(JsonFieldsEncryptor.ENCRYPTED_PREFIX)) { - return true; - } - try { - final URI uri = new URI(value); - if (uri.getScheme() != null && uri.getRawUserInfo() != null) { - final String[] userPass = uri.getRawUserInfo().split(":", 2); - return userPass.length == 2 && - userPass[1].startsWith(JsonFieldsEncryptor.ENCRYPTED_PREFIX); - } - } catch (final Exception ignored) { - // Not a valid URI — fall through - } - return false; - } + private record ProgressUpdate(MigrationProgress progress) {} /** - * Builds a MongoDB filter that matches only documents containing at least one of the - * encryptable fields. This avoids fetching documents (e.g. empty events) that have - * no fields to encrypt/decrypt. - * - * @param documentPrefix the BSON path prefix to the document (e.g. "s2" for snapshots, - * "events.p" for journal payloads) - * @param entityTypePrefix the entity type prefix applied to pointers (e.g. "connection" for journal) - * @param pointers the configured JSON pointers to encrypt - * @return a Bson filter requiring at least one encryptable field to exist + * Internal message used to pipe migration completion back to the actor thread. */ - private static Bson buildEncryptableFieldsFilter(final String documentPrefix, - final String entityTypePrefix, final List pointers) { - final String prefix = entityTypePrefix.isEmpty() - ? documentPrefix - : documentPrefix + "." + entityTypePrefix; - final List existsFilters = pointers.stream() - .map(pointer -> pointer.replace("/", ".")) - .map(dotPath -> Filters.exists(prefix + dotPath)) - .collect(Collectors.toList()); - return Filters.or(existsFilters); - } - - private CompletionStage saveProgress(final MigrationProgress progress) { - final Document progressDoc = new Document() - .append(ID_FIELD, PROGRESS_ID) - .append("phase", progress.phase) - .append("lastProcessedSnapshotId", progress.lastProcessedSnapshotId) - .append("lastProcessedSnapshotPid", progress.lastProcessedSnapshotPid) - .append("lastProcessedJournalId", progress.lastProcessedJournalId) - .append("lastProcessedJournalPid", progress.lastProcessedJournalPid) - .append("snapshotsProcessed", progress.snapshotsProcessed) - .append("snapshotsSkipped", progress.snapshotsSkipped) - .append("snapshotsFailed", progress.snapshotsFailed) - .append("journalProcessed", progress.journalProcessed) - .append("journalSkipped", progress.journalSkipped) - .append("journalFailed", progress.journalFailed) - .append("startedAt", progress.startedAt) - .append("updatedAt", Instant.now().toString()); - - return Source.fromPublisher( - progressCollection.replaceOne( - Filters.eq(ID_FIELD, PROGRESS_ID), - progressDoc, - new ReplaceOptions().upsert(true))) - .runWith(Sink.ignore(), materializer) - .thenApply(done -> null); - } - - private CompletionStage> loadProgress() { - return Source.fromPublisher( - progressCollection.find(Filters.eq(ID_FIELD, PROGRESS_ID)).first()) - .runWith(Sink.headOption(), materializer) - .thenApply(optDoc -> optDoc.map(doc -> { - final MigrationProgress progress = new MigrationProgress(); - progress.phase = doc.getString("phase"); - progress.lastProcessedSnapshotId = doc.getString("lastProcessedSnapshotId"); - progress.lastProcessedSnapshotPid = doc.getString("lastProcessedSnapshotPid"); - progress.lastProcessedJournalId = doc.getString("lastProcessedJournalId"); - progress.lastProcessedJournalPid = doc.getString("lastProcessedJournalPid"); - progress.snapshotsProcessed = doc.getLong("snapshotsProcessed") != null - ? doc.getLong("snapshotsProcessed") : 0L; - progress.snapshotsSkipped = doc.getLong("snapshotsSkipped") != null - ? doc.getLong("snapshotsSkipped") : 0L; - progress.snapshotsFailed = doc.getLong("snapshotsFailed") != null - ? doc.getLong("snapshotsFailed") : 0L; - progress.journalProcessed = doc.getLong("journalProcessed") != null - ? doc.getLong("journalProcessed") : 0L; - progress.journalSkipped = doc.getLong("journalSkipped") != null - ? doc.getLong("journalSkipped") : 0L; - progress.journalFailed = doc.getLong("journalFailed") != null - ? doc.getLong("journalFailed") : 0L; - progress.startedAt = doc.getString("startedAt"); - return progress; - })); - } - - private CompletionStage deleteProgress() { - return Source.fromPublisher( - progressCollection.deleteOne(Filters.eq(ID_FIELD, PROGRESS_ID))) - .runWith(Sink.ignore(), materializer) - .thenApply(done -> null); - } - - /** - * Mutable progress tracker for the migration process. - */ - static final class MigrationProgress { - String phase = PHASE_SNAPSHOTS; - String lastProcessedSnapshotId; - String lastProcessedSnapshotPid; - String lastProcessedJournalId; - String lastProcessedJournalPid; - long snapshotsProcessed; - long snapshotsSkipped; - long snapshotsFailed; - long journalProcessed; - long journalSkipped; - long journalFailed; - String startedAt = Instant.now().toString(); - - MigrationProgress withPhase(final String newPhase) { - this.phase = newPhase; - return this; - } - - MigrationProgress withLastSnapshotId(final String id) { - this.lastProcessedSnapshotId = id; - return this; - } - - MigrationProgress withLastSnapshotPid(final String pid) { - this.lastProcessedSnapshotPid = pid; - return this; - } - - MigrationProgress withLastJournalId(final String id) { - this.lastProcessedJournalId = id; - return this; - } - - MigrationProgress withLastJournalPid(final String pid) { - this.lastProcessedJournalPid = pid; - return this; - } - - MigrationProgress incrementSnapshotsProcessed() { - this.snapshotsProcessed++; - return this; - } - - MigrationProgress incrementSnapshotsSkipped() { - this.snapshotsSkipped++; - return this; - } - - MigrationProgress incrementSnapshotsFailed() { - this.snapshotsFailed++; - return this; - } - - MigrationProgress incrementJournalProcessed() { - this.journalProcessed++; - return this; - } - - MigrationProgress incrementJournalSkipped() { - this.journalSkipped++; - return this; - } - - MigrationProgress incrementJournalFailed() { - this.journalFailed++; - return this; - } - - @Override - public String toString() { - return "MigrationProgress[" + - "phase=" + phase + - ", snapshots(processed=" + snapshotsProcessed + - ", skipped=" + snapshotsSkipped + - ", failed=" + snapshotsFailed + ")" + - ", journal(processed=" + journalProcessed + - ", skipped=" + journalSkipped + - ", failed=" + journalFailed + ")" + - "]"; - } - } + private record MigrationCompleted(@Nullable MigrationProgress progress, @Nullable Throwable error, boolean dryRun) {} } diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java index 501971f764e..d896541edb9 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/JsonFieldsEncryptor.java @@ -157,7 +157,7 @@ private static JsonObject createPatch(final JsonPointer pointer, final String ol private static String decryptValue(final String value, final String symmetricKey) { if (value.startsWith(ENCRYPTED_PREFIX)) { - final String striped = value.replace(ENCRYPTED_PREFIX, ""); + final String striped = value.substring(ENCRYPTED_PREFIX.length()); try { return EncryptorAesGcm.decryptWithPrefixIV(striped, symmetricKey); } catch (final Exception e) { @@ -177,7 +177,7 @@ private static String decryptValueWithFallback(final String value, final String return value; } - final String stripped = value.replace(ENCRYPTED_PREFIX, ""); + final String stripped = value.substring(ENCRYPTED_PREFIX.length()); // Try current key first try { @@ -189,6 +189,7 @@ private static String decryptValueWithFallback(final String value, final String try { return EncryptorAesGcm.decryptWithPrefixIV(stripped, oldSymmetricKey.get()); } catch (final Exception oldKeyException) { + oldKeyException.addSuppressed(currentKeyException); throw ConnectionConfigurationInvalidException.newBuilder( "Decryption of connection field failed with both current and old keys. " + "Verify that the configured encryption keys match the keys used to encrypt the data.") diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java index 51e8763d9b4..76bdf4efa0f 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryption.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java index 7cd5106d6bf..1962d50bbcf 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbort.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java index 3398ea5ced7..276004a865e 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionAbortResponse.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java index f9c0845e569..d0fbcf46d74 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionResponse.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java index 8b64c45df09..dbadaa2556b 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatus.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java index a1eb501f52e..3b9c8c324f1 100644 --- a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrateConnectionEncryptionStatusResponse.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. @@ -12,6 +12,7 @@ */ package org.eclipse.ditto.connectivity.service.messaging.persistence; +import java.time.Instant; import java.util.Collections; import java.util.Objects; import java.util.function.Predicate; @@ -119,66 +120,48 @@ private MigrateConnectionEncryptionStatusResponse(final String phase, * Creates a new {@code MigrateConnectionEncryptionStatusResponse}. * * @param phase the current migration phase. - * @param snapshotsProcessed number of snapshots processed. - * @param snapshotsSkipped number of snapshots skipped. - * @param snapshotsFailed number of snapshots that failed. - * @param journalProcessed number of journal documents processed. - * @param journalSkipped number of journal documents skipped. - * @param journalFailed number of journal documents that failed. - * @param lastProcessedSnapshotId last processed snapshot document ID, may be {@code null}. - * @param lastProcessedSnapshotPid last processed snapshot persistence ID (connection ID), may be {@code null}. - * @param lastProcessedJournalId last processed journal document ID, may be {@code null}. - * @param lastProcessedJournalPid last processed journal persistence ID (connection ID), may be {@code null}. - * @param startedAt when migration started, may be {@code null}. - * @param updatedAt when migration was last updated, may be {@code null}. + * @param migrationProgress the migration progress containing counters, last-processed IDs and timing. * @param dryRun whether the migration was/is a dry-run. * @param migrationActive whether migration is currently active. * @param dittoHeaders the headers. * @return the response. */ public static MigrateConnectionEncryptionStatusResponse of(final String phase, - final long snapshotsProcessed, final long snapshotsSkipped, final long snapshotsFailed, - final long journalProcessed, final long journalSkipped, final long journalFailed, - @Nullable final String lastProcessedSnapshotId, @Nullable final String lastProcessedSnapshotPid, - @Nullable final String lastProcessedJournalId, @Nullable final String lastProcessedJournalPid, - @Nullable final String startedAt, @Nullable final String updatedAt, + final MigrationProgress migrationProgress, final boolean dryRun, final boolean migrationActive, final DittoHeaders dittoHeaders) { final JsonObject snapshots = JsonFactory.newObjectBuilder() - .set("processed", snapshotsProcessed) - .set("skipped", snapshotsSkipped) - .set("failed", snapshotsFailed) + .set("processed", migrationProgress.snapshotsProcessed()) + .set("skipped", migrationProgress.snapshotsSkipped()) + .set("failed", migrationProgress.snapshotsFailed()) .build(); final JsonObject journal = JsonFactory.newObjectBuilder() - .set("processed", journalProcessed) - .set("skipped", journalSkipped) - .set("failed", journalFailed) + .set("processed", migrationProgress.journalProcessed()) + .set("skipped", migrationProgress.journalSkipped()) + .set("failed", migrationProgress.journalFailed()) .build(); final JsonObjectBuilder progressBuilder = JsonFactory.newObjectBuilder(); - if (lastProcessedSnapshotId != null) { - progressBuilder.set("lastProcessedSnapshotId", lastProcessedSnapshotId); + if (migrationProgress.lastProcessedSnapshotId() != null) { + progressBuilder.set("lastProcessedSnapshotId", migrationProgress.lastProcessedSnapshotId()); } - if (lastProcessedSnapshotPid != null) { - progressBuilder.set("lastProcessedSnapshotPid", lastProcessedSnapshotPid); + if (migrationProgress.lastProcessedSnapshotPid() != null) { + progressBuilder.set("lastProcessedSnapshotPid", migrationProgress.lastProcessedSnapshotPid()); } - if (lastProcessedJournalId != null) { - progressBuilder.set("lastProcessedJournalId", lastProcessedJournalId); + if (migrationProgress.lastProcessedJournalId() != null) { + progressBuilder.set("lastProcessedJournalId", migrationProgress.lastProcessedJournalId()); } - if (lastProcessedJournalPid != null) { - progressBuilder.set("lastProcessedJournalPid", lastProcessedJournalPid); + if (migrationProgress.lastProcessedJournalPid() != null) { + progressBuilder.set("lastProcessedJournalPid", migrationProgress.lastProcessedJournalPid()); } final JsonObject progress = progressBuilder.build(); final JsonObjectBuilder timingBuilder = JsonFactory.newObjectBuilder(); - if (startedAt != null) { - timingBuilder.set("startedAt", startedAt); - } - if (updatedAt != null) { - timingBuilder.set("updatedAt", updatedAt); - } + timingBuilder.set("startedAt", migrationProgress.startedAt()); + final String updatedAt = Instant.now().toString(); + timingBuilder.set("updatedAt", updatedAt); final JsonObject timing = timingBuilder.build(); return new MigrateConnectionEncryptionStatusResponse(phase, snapshots, journal, @@ -281,7 +264,8 @@ public boolean equals(@Nullable final Object o) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), phase, snapshots, journalEvents, progress, timing, dryRun, migrationActive); + return Objects.hash(super.hashCode(), phase, snapshots, journalEvents, progress, timing, dryRun, + migrationActive); } @Override diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrationProgress.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrationProgress.java new file mode 100644 index 00000000000..e1dd09c6b69 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/MigrationProgress.java @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.time.Instant; + +import javax.annotation.Nullable; + +/** + * Immutable progress tracker for the encryption migration process. + *

+ * Tracks the current migration phase, counters for processed/skipped/failed documents, + * last processed IDs for resume functionality, and timing information. + */ +public record MigrationProgress( + String phase, + @Nullable String lastProcessedSnapshotId, + @Nullable String lastProcessedSnapshotPid, + @Nullable String lastProcessedJournalId, + @Nullable String lastProcessedJournalPid, + long snapshotsProcessed, + long snapshotsSkipped, + long snapshotsFailed, + long journalProcessed, + long journalSkipped, + long journalFailed, + String startedAt +) { + + /** + * Creates a new MigrationProgress with default values starting in the snapshots phase. + */ + public MigrationProgress() { + this("snapshots", null, null, null, null, + 0L, 0L, 0L, 0L, 0L, 0L, + Instant.now().toString()); + } + + MigrationProgress withPhase(final String newPhase) { + return new MigrationProgress(newPhase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress withLastSnapshotId(final String id) { + return new MigrationProgress(phase, id, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress withLastSnapshotPid(final String pid) { + return new MigrationProgress(phase, lastProcessedSnapshotId, pid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress withLastJournalId(final String id) { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + id, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress withLastJournalPid(final String pid) { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, pid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress incrementSnapshotsProcessed() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed + 1, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress incrementSnapshotsSkipped() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped + 1, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress incrementSnapshotsFailed() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed + 1, + journalProcessed, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress incrementJournalProcessed() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed + 1, journalSkipped, journalFailed, startedAt); + } + + MigrationProgress incrementJournalSkipped() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped + 1, journalFailed, startedAt); + } + + MigrationProgress incrementJournalFailed() { + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + journalProcessed, journalSkipped, journalFailed + 1, startedAt); + } + + /** + * Adjusts counters when a bulk write fails: moves the "processed" count back to "failed" + * for the documents that were counted as processed but whose write did not persist. + * + * @param failedWriteCount number of documents in the failed bulk write + * @param isSnapshot true for snapshot counters, false for journal counters + * @return adjusted progress + */ + MigrationProgress adjustForBulkWriteFailure(final int failedWriteCount, final boolean isSnapshot) { + if (isSnapshot) { + final long adjustedProcessed = Math.max(0, snapshotsProcessed - failedWriteCount); + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + adjustedProcessed, snapshotsSkipped, snapshotsFailed + failedWriteCount, + journalProcessed, journalSkipped, journalFailed, startedAt); + } else { + final long adjustedProcessed = Math.max(0, journalProcessed - failedWriteCount); + return new MigrationProgress(phase, lastProcessedSnapshotId, lastProcessedSnapshotPid, + lastProcessedJournalId, lastProcessedJournalPid, + snapshotsProcessed, snapshotsSkipped, snapshotsFailed, + adjustedProcessed, journalSkipped, journalFailed + failedWriteCount, startedAt); + } + } + + @Override + public String toString() { + return "MigrationProgress[" + + "phase=" + phase + + ", snapshots(processed=" + snapshotsProcessed + + ", skipped=" + snapshotsSkipped + + ", failed=" + snapshotsFailed + ")" + + ", journal(processed=" + journalProcessed + + ", skipped=" + journalSkipped + + ", failed=" + journalFailed + ")" + + "]"; + } +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessingResult.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessingResult.java new file mode 100644 index 00000000000..4e25f918e62 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessingResult.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import javax.annotation.Nullable; + +import org.bson.Document; + +import com.mongodb.client.model.WriteModel; + +/** + * Result of processing a MongoDB document during encryption migration. + *

+ * Encapsulates the outcome (processed, skipped, or failed) and an optional write model + * for batch updates. + * + * @param outcome the processing outcome + * @param writeModel the MongoDB write model for batch updates, or {@code null} if dry-run or skipped + */ +public record DocumentProcessingResult( + DocumentOutcome outcome, + @Nullable WriteModel writeModel +) { + + /** + * Creates a result for a successfully processed document. + * + * @param writeModel the MongoDB write model, or {@code null} if dry-run + * @return a processed result + */ + public static DocumentProcessingResult processed(@Nullable final WriteModel writeModel) { + return new DocumentProcessingResult(DocumentOutcome.PROCESSED, writeModel); + } + + /** + * Creates a result for a skipped document. + *

+ * Documents are skipped when they are already in the desired state (e.g., already encrypted + * with the new key, or already plaintext when disabling encryption). + * + * @return a skipped result + */ + public static DocumentProcessingResult skipped() { + return new DocumentProcessingResult(DocumentOutcome.SKIPPED, null); + } + + /** + * Creates a result for a failed document. + *

+ * Documents fail when they cannot be processed due to errors (e.g., corrupted data, + * decryption failures, unexpected structure). + * + * @return a failed result + */ + public static DocumentProcessingResult failed() { + return new DocumentProcessingResult(DocumentOutcome.FAILED, null); + } + + /** + * Possible outcomes for document processing. + */ + public enum DocumentOutcome { + /** + * Document was successfully processed and re-encrypted. + */ + PROCESSED, + + /** + * Document was skipped because it's already in the desired state. + */ + SKIPPED, + + /** + * Document processing failed due to an error. + */ + FAILED + } +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessor.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessor.java new file mode 100644 index 00000000000..af956a42dc7 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessor.java @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import org.bson.BsonDocument; +import org.bson.Document; +import org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException; +import org.eclipse.ditto.connectivity.service.messaging.persistence.JsonFieldsEncryptor; +import org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson; +import org.eclipse.ditto.json.JsonObject; +import org.eclipse.ditto.json.JsonPointer; +import org.eclipse.ditto.json.JsonValue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.WriteModel; + +/** + * Processes MongoDB documents during encryption migration. + *

+ * Provides static methods for transforming snapshot and journal documents, including + * re-encrypting fields with new keys, decrypting fields to plaintext, and initial encryption + * of plaintext data. + */ +public final class DocumentProcessor { + + private static final Logger LOGGER = LoggerFactory.getLogger(DocumentProcessor.class); + + // MongoDB field names for pekko-persistence-mongodb + private static final String SNAPSHOT_SERIALIZED_FIELD = "s2"; + private static final String JOURNAL_EVENTS_FIELD = "events"; + private static final String JOURNAL_PAYLOAD_FIELD = "p"; + private static final String ID_FIELD = "_id"; + + private DocumentProcessor() { + // Utility class - no instantiation + } + + /** + * Processes a snapshot document during migration. + *

+ * Reads the serialized snapshot (BSON field "s2"), decrypts/re-encrypts according to + * the migration context, and creates a write model for batch updates. + * + * @param doc the MongoDB snapshot document + * @param context the migration context (keys, pointers, entity prefix) + * @param dryRun if true, skip writing changes to MongoDB + * @return the processing result + */ + public static DocumentProcessingResult processSnapshotDocument(final Document doc, + final MigrationContext context, final boolean dryRun) { + final String docId = doc.get(ID_FIELD).toString(); + final String pid = doc.getString("pid"); + try { + final Document s2 = doc.get(SNAPSHOT_SERIALIZED_FIELD, Document.class); + if (s2 == null) { + return DocumentProcessingResult.skipped(); + } else { + final BsonDocument bsonDoc = s2.toBsonDocument(Document.class, + com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); + final JsonObject jsonObject = DittoBsonJson.getInstance().serialize(bsonDoc); + + final Optional reEncrypted = reEncryptFields(jsonObject, context); + + if (reEncrypted.isEmpty()) { + return DocumentProcessingResult.skipped(); + } else { + if (!dryRun) { + final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted.get()); + doc.put(SNAPSHOT_SERIALIZED_FIELD, Document.parse(newBson.toJson())); + final WriteModel writeModel = new ReplaceOneModel<>( + Filters.eq(ID_FIELD, doc.get(ID_FIELD)), + doc); + return DocumentProcessingResult.processed(writeModel); + } + return DocumentProcessingResult.processed(null); + } + } + } catch (final Exception e) { + LOGGER.warn("Failed to process snapshot {} (pid={}): {}", docId, pid, e.getMessage()); + return DocumentProcessingResult.failed(); + } + } + + /** + * Processes a journal document during migration. + *

+ * Iterates through the "events" array, decrypts/re-encrypts each event payload according to + * the migration context, and creates a write model for batch updates. + * + * @param doc the MongoDB journal document + * @param context the migration context (keys, pointers, entity prefix) + * @param dryRun if true, skip writing changes to MongoDB + * @return the processing result + */ + public static DocumentProcessingResult processJournalDocument(final Document doc, + final MigrationContext context, final boolean dryRun) { + final Object docId = doc.get(ID_FIELD); + final String docIdStr = docId.toString(); + final String pid = doc.getString("pid"); + try { + final List events = doc.getList(JOURNAL_EVENTS_FIELD, Document.class); + if (events == null || events.isEmpty()) { + return DocumentProcessingResult.skipped(); + } else { + boolean anyChanged = false; + final List updatedEvents = new ArrayList<>(events.size()); + + for (final Document event : events) { + final Document payload = event.get(JOURNAL_PAYLOAD_FIELD, Document.class); + if (payload == null) { + updatedEvents.add(event); + } else { + final BsonDocument bsonPayload = payload.toBsonDocument(Document.class, + com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); + final JsonObject jsonPayload = DittoBsonJson.getInstance().serialize(bsonPayload); + + final Optional reEncrypted = reEncryptFields(jsonPayload, context); + + if (reEncrypted.isPresent()) { + if (!dryRun) { + final BsonDocument newBson = DittoBsonJson.getInstance().parse(reEncrypted.get()); + event.put(JOURNAL_PAYLOAD_FIELD, Document.parse(newBson.toJson())); + } + anyChanged = true; + } + updatedEvents.add(event); + } + } + + if (anyChanged) { + if (!dryRun) { + doc.put(JOURNAL_EVENTS_FIELD, updatedEvents); + final WriteModel writeModel = new ReplaceOneModel<>( + Filters.eq(ID_FIELD, docId), + doc); + return DocumentProcessingResult.processed(writeModel); + } + return DocumentProcessingResult.processed(null); + } else { + return DocumentProcessingResult.skipped(); + } + } + } catch (final Exception e) { + LOGGER.warn("Failed to process journal document {} (pid={}): {}", docIdStr, pid, e.getMessage()); + return DocumentProcessingResult.failed(); + } + } + + /** + * Re-encrypts fields in a JSON object based on the migration mode. + *

+ * Supports three modes: + *

    + *
  • Initial encryption ({@code oldKey == null}): encrypt plaintext with newKey
  • + *
  • Key rotation (both keys set): decrypt with oldKey, encrypt with newKey
  • + *
  • Disable encryption ({@code newKey == null}): decrypt with oldKey, write plaintext
  • + *
+ * + * @param jsonObject the JSON object to process + * @param context the migration context (keys, pointers, entity prefix) + * @return the transformed JSON object, or {@code Optional.empty()} if already in the desired state (skip) + */ + public static Optional reEncryptFields(final JsonObject jsonObject, + final MigrationContext context) { + + final String oldKey = context.oldKey(); + final String newKey = context.newKey(); + final List pointers = context.pointers(); + final String entityTypePrefix = context.entityTypePrefix(); + + if (oldKey == null && newKey != null) { + // Initial encryption: data is plaintext, encrypt with new key + // Check if any field already has the encrypted_ prefix — if so, skip + final boolean alreadyEncrypted = pointers.stream() + .map(p -> entityTypePrefix + p) + .map(JsonPointer::of) + .flatMap(pointer -> jsonObject.getValue(pointer).stream()) + .filter(JsonValue::isString) + .anyMatch(v -> containsEncryptedValue(v.asString())); + if (alreadyEncrypted) { + return Optional.empty(); + } + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(jsonObject, entityTypePrefix, pointers, newKey); + // Skip if encrypt produced no changes (e.g. no matching pointers in this entity) + return encrypted.equals(jsonObject) ? Optional.empty() : Optional.of(encrypted); + } + + // Key rotation or disable workflow — oldKey must be set + // Try decrypting with the old key + try { + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, + pointers, oldKey); + + if (newKey == null) { + // Disable workflow: return decrypted plaintext, but skip if nothing changed + // (decrypt silently passes through plaintext values, so unchanged means already plain) + return decrypted.equals(jsonObject) ? Optional.empty() : Optional.of(decrypted); + } else { + // Key rotation: re-encrypt with new key + final JsonObject reEncrypted = JsonFieldsEncryptor.encrypt(decrypted, entityTypePrefix, pointers, newKey); + // Skip if the result is identical (e.g. no matching pointers in this entity) + return reEncrypted.equals(jsonObject) ? Optional.empty() : Optional.of(reEncrypted); + } + } catch (final ConnectionConfigurationInvalidException e) { + // Old key failed — try new key to see if already migrated + // (Only applicable for key rotation, not disable workflow) + if (newKey == null) { + // Disable workflow: if old key fails, data is already plaintext - skip + return Optional.empty(); + } + + try { + JsonFieldsEncryptor.decrypt(jsonObject, entityTypePrefix, pointers, newKey); + // Already encrypted with new key — skip + return Optional.empty(); + } catch (final ConnectionConfigurationInvalidException e2) { + // Both keys failed on encrypted_ prefixed data — this is an error, not plaintext. + // Re-encrypting would cause double encryption. + e2.addSuppressed(e); + throw e2; + } + } + } + + /** + * Checks whether a string value contains an encrypted portion — either as a direct + * {@code encrypted_} prefix (non-URI fields) or embedded in the password part of a URI. + * + * @param value the string value to check + * @return true if the value contains encrypted data + */ + private static boolean containsEncryptedValue(final String value) { + final String encryptedPrefix = "encrypted_"; + if (value.startsWith(encryptedPrefix)) { + return true; + } + try { + final URI uri = new URI(value); + if (uri.getScheme() != null && uri.getRawUserInfo() != null) { + final String[] userPass = uri.getRawUserInfo().split(":", 2); + return userPass.length == 2 && + userPass[1].startsWith(encryptedPrefix); + } + } catch (final Exception ignored) { + // Not a valid URI — fall through + } + return false; + } +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationContext.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationContext.java new file mode 100644 index 00000000000..712c2756265 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationContext.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import java.util.List; + +import javax.annotation.Nullable; + +/** + * Immutable configuration context for encryption migration operations. + *

+ * Encapsulates the encryption keys, JSON pointers to encrypt/decrypt, and the entity type prefix + * required for processing connection snapshots and journal events. + * + * @param oldKey the old encryption key, or {@code null} for initial encryption (plaintext data) + * @param newKey the new encryption key, or {@code null} to disable encryption (write plaintext) + * @param pointers the JSON pointers to encrypt/decrypt (e.g., "/uri", "/credentials/password") + * @param entityTypePrefix the entity type prefix applied to pointers (e.g., "connection" for journal events, "" for snapshots) + */ +public record MigrationContext( + @Nullable String oldKey, + @Nullable String newKey, + List pointers, + String entityTypePrefix +) { + + /** + * Creates a migration context for snapshot processing. + *

+ * Snapshots use an empty entity type prefix, so pointers are applied directly + * (e.g., "/uri" targets the "/uri" field in the snapshot JSON). + * + * @param oldKey the old encryption key, or {@code null} for initial encryption + * @param newKey the new encryption key, or {@code null} to disable encryption + * @param pointers the JSON pointers to encrypt/decrypt + * @return a migration context for snapshots + */ + public static MigrationContext forSnapshots(@Nullable final String oldKey, @Nullable final String newKey, + final List pointers) { + return new MigrationContext(oldKey, newKey, pointers, ""); + } + + /** + * Creates a migration context for journal event processing. + *

+ * Journal events use "connection" as the entity type prefix, so pointers are prefixed + * (e.g., "/uri" targets the "/connection/uri" field in the journal event JSON). + * + * @param oldKey the old encryption key, or {@code null} for initial encryption + * @param newKey the new encryption key, or {@code null} to disable encryption + * @param pointers the JSON pointers to encrypt/decrypt + * @return a migration context for journal events + */ + public static MigrationContext forJournal(@Nullable final String oldKey, @Nullable final String newKey, + final List pointers) { + return new MigrationContext(oldKey, newKey, pointers, "connection"); + } +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationProgressTracker.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationProgressTracker.java new file mode 100644 index 00000000000..39172d39115 --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationProgressTracker.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import java.time.Instant; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +import org.apache.pekko.stream.Materializer; +import org.apache.pekko.stream.javadsl.Sink; +import org.apache.pekko.stream.javadsl.Source; +import org.bson.Document; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrationProgress; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.reactivestreams.client.MongoCollection; + +/** + * Handles persistence of encryption migration progress to MongoDB. + *

+ * Provides methods to save, load, and delete migration progress, enabling resume + * functionality after service restarts or migration aborts. + */ +public final class MigrationProgressTracker { + + private static final Logger LOGGER = LoggerFactory.getLogger(MigrationProgressTracker.class); + + private static final String PROGRESS_ID = "current"; + private static final String ID_FIELD = "_id"; + + private final MongoCollection progressCollection; + private final Materializer materializer; + + /** + * Creates a new MigrationProgressTracker. + * + * @param progressCollection the MongoDB collection for storing progress + * @param materializer the Pekko Streams materializer + */ + public MigrationProgressTracker(final MongoCollection progressCollection, + final Materializer materializer) { + this.progressCollection = progressCollection; + this.materializer = materializer; + } + + /** + * Saves migration progress to MongoDB. + *

+ * Uses upsert to create or update the progress document. The progress document + * includes all counters, last processed IDs, and timestamp information. + * + * @param progress the migration progress to save + * @return a completion stage that completes when the save operation finishes + */ + public CompletionStage saveProgress(final MigrationProgress progress) { + final Document progressDoc = new Document() + .append(ID_FIELD, PROGRESS_ID) + .append("phase", progress.phase()) + .append("lastProcessedSnapshotId", progress.lastProcessedSnapshotId()) + .append("lastProcessedSnapshotPid", progress.lastProcessedSnapshotPid()) + .append("lastProcessedJournalId", progress.lastProcessedJournalId()) + .append("lastProcessedJournalPid", progress.lastProcessedJournalPid()) + .append("snapshotsProcessed", progress.snapshotsProcessed()) + .append("snapshotsSkipped", progress.snapshotsSkipped()) + .append("snapshotsFailed", progress.snapshotsFailed()) + .append("journalProcessed", progress.journalProcessed()) + .append("journalSkipped", progress.journalSkipped()) + .append("journalFailed", progress.journalFailed()) + .append("startedAt", progress.startedAt()) + .append("updatedAt", Instant.now().toString()); + + return Source.fromPublisher( + progressCollection.replaceOne( + Filters.eq(ID_FIELD, PROGRESS_ID), + progressDoc, + new ReplaceOptions().upsert(true))) + .runWith(Sink.ignore(), materializer) + .thenApply(done -> null); + } + + /** + * Saves migration progress with retry logic for handling transient failures. + *

+ * Used primarily when saving abort progress to ensure it persists even if + * there are temporary MongoDB connection issues. + * + * @param progress the migration progress to save + * @param maxRetries maximum number of retry attempts + * @return a completion stage that completes when the save operation succeeds or all retries are exhausted + */ + public CompletionStage saveProgressWithRetry(final MigrationProgress progress, final int maxRetries) { + return saveProgress(progress).exceptionallyCompose(err -> { + if (maxRetries > 0) { + LOGGER.warn("Retrying progress save (remaining retries={}): {}", maxRetries, err.getMessage()); + return saveProgressWithRetry(progress, maxRetries - 1); + } + return CompletableFuture.failedFuture(err); + }); + } + + /** + * Loads migration progress from MongoDB. + *

+ * Used when resuming a migration after a service restart or to check the status + * of a previous migration. + * + * @return a completion stage with an optional containing the progress if it exists + */ + public CompletionStage> loadProgress() { + return Source.fromPublisher( + progressCollection.find(Filters.eq(ID_FIELD, PROGRESS_ID)).first()) + .runWith(Sink.headOption(), materializer) + .thenApply(optDoc -> optDoc.map(doc -> new MigrationProgress( + doc.getString("phase"), + doc.getString("lastProcessedSnapshotId"), + doc.getString("lastProcessedSnapshotPid"), + doc.getString("lastProcessedJournalId"), + doc.getString("lastProcessedJournalPid"), + doc.getLong("snapshotsProcessed") != null + ? doc.getLong("snapshotsProcessed") : 0L, + doc.getLong("snapshotsSkipped") != null + ? doc.getLong("snapshotsSkipped") : 0L, + doc.getLong("snapshotsFailed") != null + ? doc.getLong("snapshotsFailed") : 0L, + doc.getLong("journalProcessed") != null + ? doc.getLong("journalProcessed") : 0L, + doc.getLong("journalSkipped") != null + ? doc.getLong("journalSkipped") : 0L, + doc.getLong("journalFailed") != null + ? doc.getLong("journalFailed") : 0L, + doc.getString("startedAt") + ))); + } + + /** + * Deletes migration progress from MongoDB. + *

+ * Used when starting a fresh migration (not resuming) to clear any previously + * saved progress. + * + * @return a completion stage that completes when the delete operation finishes + */ + public CompletionStage deleteProgress() { + return Source.fromPublisher( + progressCollection.deleteOne(Filters.eq(ID_FIELD, PROGRESS_ID))) + .runWith(Sink.ignore(), materializer) + .thenApply(done -> null); + } +} diff --git a/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationStreamFactory.java b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationStreamFactory.java new file mode 100644 index 00000000000..65798ce725a --- /dev/null +++ b/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/MigrationStreamFactory.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import java.util.List; +import java.util.stream.Collectors; + +import javax.annotation.Nullable; + +import org.apache.pekko.NotUsed; +import org.apache.pekko.stream.javadsl.Source; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.eclipse.ditto.connectivity.service.messaging.persistence.MigrationProgress; + +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Sorts; +import com.mongodb.reactivestreams.client.MongoCollection; + +/** + * Factory for building Pekko Streams sources for encryption migration. + *

+ * Provides static methods to construct MongoDB query streams with appropriate filters, + * sorting, throttling, and resume capabilities for snapshot and journal migrations. + */ +public final class MigrationStreamFactory { + + // MongoDB field names for pekko-persistence-mongodb + private static final String SNAPSHOT_SERIALIZED_FIELD = "s2"; + private static final String JOURNAL_EVENTS_FIELD = "events"; + private static final String JOURNAL_PAYLOAD_FIELD = "p"; + private static final String ID_FIELD = "_id"; + + // Entity type prefix used for journal event encryption + private static final String JOURNAL_ENTITY_TYPE_PREFIX = "connection"; + // Snapshot encryption uses empty prefix + private static final String SNAPSHOT_ENTITY_TYPE_PREFIX = ""; + + private MigrationStreamFactory() { + // Utility class - no instantiation + } + + /** + * Builds a Pekko Streams source for snapshot migration. + *

+ * The source queries the snapshot collection with filters for: + *

    + *
  • Resume support: Skip documents already processed (based on lastProcessedSnapshotId)
  • + *
  • Encryptable fields: Only fetch documents containing fields that can be encrypted
  • + *
+ * Documents are sorted by _id (ascending) and fetched with the specified batch size. + * + * @param snapshotCollection the MongoDB snapshot collection + * @param progress the current migration progress (for resume support) + * @param pointers the JSON pointers to encrypt/decrypt + * @param batchSize the MongoDB fetch batch size + * @param maxDocumentsPerMinute throttling limit (0 = disabled) + * @return a Pekko Streams source of snapshot documents + */ + public static Source buildSnapshotStream( + final MongoCollection snapshotCollection, + final MigrationProgress progress, + final List pointers, + final int batchSize, + final int maxDocumentsPerMinute) { + + final Bson resumeFilter = buildResumeFilter(progress.lastProcessedSnapshotId()); + final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( + SNAPSHOT_SERIALIZED_FIELD, SNAPSHOT_ENTITY_TYPE_PREFIX, pointers); + final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); + + final Source source = Source.fromPublisher( + snapshotCollection.find(filter) + .sort(Sorts.ascending(ID_FIELD)) + .batchSize(batchSize)); + + return applyThrottling(source, maxDocumentsPerMinute); + } + + /** + * Builds a Pekko Streams source for journal migration. + *

+ * The source queries the journal collection with filters for: + *

    + *
  • Resume support: Skip documents already processed (based on lastProcessedJournalId)
  • + *
  • Encryptable fields: Only fetch documents with event payloads containing encryptable fields
  • + *
+ * Documents are sorted by _id (ascending) and fetched with the specified batch size. + * + * @param journalCollection the MongoDB journal collection + * @param progress the current migration progress (for resume support) + * @param pointers the JSON pointers to encrypt/decrypt + * @param batchSize the MongoDB fetch batch size + * @param maxDocumentsPerMinute throttling limit (0 = disabled) + * @return a Pekko Streams source of journal documents + */ + public static Source buildJournalStream( + final MongoCollection journalCollection, + final MigrationProgress progress, + final List pointers, + final int batchSize, + final int maxDocumentsPerMinute) { + + final Bson resumeFilter = progress.lastProcessedJournalId() != null + ? Filters.gt(ID_FIELD, new ObjectId(progress.lastProcessedJournalId())) + : Filters.empty(); + final Bson encryptableFieldsFilter = buildEncryptableFieldsFilter( + JOURNAL_EVENTS_FIELD + "." + JOURNAL_PAYLOAD_FIELD, + JOURNAL_ENTITY_TYPE_PREFIX, pointers); + final Bson filter = Filters.and(resumeFilter, encryptableFieldsFilter); + + final Source source = Source.fromPublisher( + journalCollection.find(filter) + .sort(Sorts.ascending(ID_FIELD)) + .batchSize(batchSize)); + + return applyThrottling(source, maxDocumentsPerMinute); + } + + /** + * Builds a MongoDB resume filter to skip already-processed documents. + *

+ * Handles both ObjectId and string-based _id formats. If the lastProcessedId is not + * a valid ObjectId, falls back to string comparison. + * + * @param lastProcessedId the ID of the last processed document, or null to start from the beginning + * @return a Bson filter for resume support + */ + private static Bson buildResumeFilter(@Nullable final String lastProcessedId) { + if (lastProcessedId == null) { + return Filters.empty(); + } + + try { + return Filters.gt(ID_FIELD, new ObjectId(lastProcessedId)); + } catch (final IllegalArgumentException e) { + // Non-ObjectId _id format — use string comparison as fallback + return Filters.gt(ID_FIELD, lastProcessedId); + } + } + + /** + * Builds a MongoDB filter that matches only documents containing at least one of the + * encryptable fields. This avoids fetching documents (e.g. empty events) that have + * no fields to encrypt/decrypt. + * + * @param documentPrefix the BSON path prefix to the document (e.g. "s2" for snapshots, + * "events.p" for journal payloads) + * @param entityTypePrefix the entity type prefix applied to pointers (e.g. "connection" for journal) + * @param pointers the configured JSON pointers to encrypt + * @return a Bson filter requiring at least one encryptable field to exist + */ + static Bson buildEncryptableFieldsFilter(final String documentPrefix, + final String entityTypePrefix, final List pointers) { + final String prefix = entityTypePrefix.isEmpty() + ? documentPrefix + : documentPrefix + "." + entityTypePrefix; + final List existsFilters = pointers.stream() + .map(pointer -> pointer.replace("/", ".")) + .map(dotPath -> Filters.exists(prefix + dotPath)) + .collect(Collectors.toList()); + return Filters.or(existsFilters); + } + + /** + * Applies throttling to the source stream if throttling is enabled (maxDocsPerMinute > 0). + *

+ * Throttling is implemented using Pekko Streams throttle operator with a token-bucket + * algorithm, allowing bursts up to maxDocsPerMinute as long as the average rate stays + * within the limit. + * + * @param source the source stream + * @param maxDocsPerMinute maximum documents per minute, 0 means no throttling + * @return throttled source if enabled, original source otherwise + */ + static Source applyThrottling(final Source source, + final int maxDocsPerMinute) { + if (maxDocsPerMinute <= 0) { + return source; + } + + return source.throttle(maxDocsPerMinute, java.time.Duration.ofMinutes(1)); + } +} diff --git a/connectivity/service/src/main/resources/connectivity.conf b/connectivity/service/src/main/resources/connectivity.conf index 1ca4ce25089..463def61edf 100644 --- a/connectivity/service/src/main/resources/connectivity.conf +++ b/connectivity/service/src/main/resources/connectivity.conf @@ -245,13 +245,22 @@ ditto { json-pointers = ${?CONNECTIVITY_CONNECTION_ENCRYPTION_POINTERS} migration { - # Batch size for the encryption migration process (documents per batch) - batch-size = 100 + # Batch size for encryption migration (documents per MongoDB bulk write) + # + # Performance vs Safety tradeoff: + # - Small batch (10-20): Safest, minimal memory impact, easier rollback on errors + # - Medium batch (50-100): Balanced performance and safety + # - Large batch (200+): Best performance, higher memory usage, larger failure impact + # + # Recommendation: Start with 10-20 for initial deployment, increase to 50-100 after + # successful test migrations. Monitor MongoDB memory and connection pool utilization. + batch-size = 10 batch-size = ${?CONNECTIVITY_ENCRYPTION_MIGRATION_BATCH_SIZE} - # Maximum number of documents to migrate per minute (throttling) - # Set to 0 to disable throttling (not recommended for production) - # Default: 100 documents per minute (safe for most deployments) + # Maximum documents to migrate per minute (throttling) + # Set to 0 to disable + # This limit works in combination with batch-size to control overall throughput: + # - Throttling adds delays between batches to stay within the limit max-documents-per-minute = 200 max-documents-per-minute = ${?CONNECTIVITY_ENCRYPTION_MIGRATION_MAX_DOCS_PER_MINUTE} } diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java index bcdf16cb7d5..bdf37bf7fe4 100644 --- a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationActorTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Contributors to the Eclipse Foundation + * Copyright (c) 2026 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. @@ -13,11 +13,14 @@ package org.eclipse.ditto.connectivity.service.messaging.persistence; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.security.NoSuchAlgorithmException; import java.util.List; +import java.util.Optional; +import org.eclipse.ditto.base.model.headers.DittoHeaders; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.DocumentProcessor; +import org.eclipse.ditto.connectivity.service.messaging.persistence.migration.MigrationContext; import org.eclipse.ditto.connectivity.service.util.EncryptorAesGcm; import org.eclipse.ditto.json.JsonFactory; import org.eclipse.ditto.json.JsonObject; @@ -55,11 +58,11 @@ public void reEncryptSnapshotFieldsFromOldKeyToNewKey() { final JsonObject plain = createPlainSnapshotJson(); final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encryptedWithOldKey, "", POINTERS, OLD_KEY, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); - assertThat(result).isNotNull(); - final JsonObject decryptedWithNewKey = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(result).isPresent(); + final JsonObject decryptedWithNewKey = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); assertThat(decryptedWithNewKey.getValue("/credentials/password")) .contains(plain.getValue("/credentials/password").get()); } @@ -70,11 +73,11 @@ public void reEncryptJournalFieldsFromOldKeyToNewKey() { final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt( plain, "connection", POINTERS, OLD_KEY); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encryptedWithOldKey, "connection", POINTERS, OLD_KEY, NEW_KEY); + final MigrationContext context = MigrationContext.forJournal(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); - assertThat(result).isNotNull(); - final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "connection", POINTERS, NEW_KEY); + assertThat(result).isPresent(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "connection", POINTERS, NEW_KEY); assertThat(decrypted.getValue("/connection/credentials/password")) .contains(plain.getValue("/connection/credentials/password").get()); } @@ -84,41 +87,52 @@ public void skipAlreadyMigratedDocument() { final JsonObject plain = createPlainSnapshotJson(); final JsonObject encryptedWithNewKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encryptedWithNewKey, "", POINTERS, OLD_KEY, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithNewKey, context); - assertThat(result).isNull(); + assertThat(result).isEmpty(); } @Test - public void whenBothKeysFailTreatsAsPlaintextAndEncrypts() { - // When neither old nor new key can decrypt, the data is treated as plaintext - // and encrypted with the new key. This handles the case where data was stored - // before encryption was enabled. + public void keyRotationEncryptsPlaintextFieldsWithNewKey() { + // During key rotation, plaintext fields (no encrypted_ prefix) are treated as plaintext: + // decrypt passes them through unchanged, then encrypt wraps them with the new key. final JsonObject plain = createPlainSnapshotJson(); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - plain, "", POINTERS, OLD_KEY, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); - // Should encrypt the plaintext data with new key - assertThat(result).isNotNull(); + assertThat(result).isPresent(); - // Verify it can be decrypted with the new key - final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); assertThat(decrypted).isEqualTo(plain); } + @Test + public void whenBothKeysFailOnEncryptedDataThrowsException() { + // Data encrypted with a wrong key (has encrypted_ prefix but neither old nor new key + // can decrypt it) should throw an error — not silently double-encrypt. + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject encryptedWithWrongKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, WRONG_KEY); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + + org.assertj.core.api.Assertions.assertThatThrownBy( + () -> DocumentProcessor.reEncryptFields(encryptedWithWrongKey, context)) + .isInstanceOf(org.eclipse.ditto.connectivity.model.ConnectionConfigurationInvalidException.class); + } + @Test public void initialEncryptionEncryptsPlaintext() { // Initial encryption: oldKey is null, newKey is set final JsonObject plain = createPlainSnapshotJson(); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - plain, "", POINTERS, null, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); - assertThat(result).isNotNull(); + assertThat(result).isPresent(); // Verify it can be decrypted with the new key - final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); assertThat(decrypted).isEqualTo(plain); } @@ -128,11 +142,11 @@ public void initialEncryptionSkipsAlreadyEncrypted() { final JsonObject plain = createPlainSnapshotJson(); final JsonObject alreadyEncrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - alreadyEncrypted, "", POINTERS, null, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(alreadyEncrypted, context); // Should skip - already encrypted - assertThat(result).isNull(); + assertThat(result).isEmpty(); } @Test @@ -146,31 +160,17 @@ public void uriPasswordReEncryptedCorrectly() { final String encryptedUri = encryptedWithOldKey.getValue("/uri").get().asString(); assertThat(encryptedUri).contains("encrypted_"); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encryptedWithOldKey, "", POINTERS, OLD_KEY, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); - assertThat(result).isNotNull(); - final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result, "", POINTERS, NEW_KEY); + assertThat(result).isPresent(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); assertThat(decrypted.getValue("/uri").map(v -> v.asString())) .hasValue("amqps://user:secretpassword@broker.example.com:5671"); } - @Test - public void plainTextFieldsNotAffected() { - final JsonObject plain = createPlainSnapshotJson(); - - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - plain, "", POINTERS, OLD_KEY, NEW_KEY); - - assertThat(result).isNotNull(); - final String encryptedPwd = result.getValue("/credentials/password").get().asString(); - assertThat(encryptedPwd).startsWith("encrypted_"); - } - @Test public void initialEncryptionSkipsUriWithAlreadyEncryptedPassword() { - // Bug 1: URI fields like amqps://user:encrypted_XXX@host were not detected - // as already encrypted because startsWith("encrypted_") checks the full URI string final JsonObject plain = JsonFactory.newObjectBuilder() .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") .set("/credentials/password", "mypassword") @@ -183,23 +183,21 @@ public void initialEncryptionSkipsUriWithAlreadyEncryptedPassword() { assertThat(encUri).contains("encrypted_"); // Initial encryption (oldKey=null) should detect the encrypted URI and skip - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encrypted, "", POINTERS, null, NEW_KEY); + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encrypted, context); - assertThat(result).isNull(); + assertThat(result).isEmpty(); } @Test public void disableWorkflowSkipsAlreadyPlaintextEntity() { - // Bug 2: decrypt() silently passes through plaintext, so disable workflow - // was counting plaintext entities as "processed" instead of "skipped" final JsonObject plain = createPlainSnapshotJson(); - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - plain, "", POINTERS, OLD_KEY, null); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, null, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); - // Should return null (skip) because decrypt returns unchanged plaintext - assertThat(result).isNull(); + // Should return empty (skip) because decrypt returns unchanged plaintext + assertThat(result).isEmpty(); } @Test @@ -211,13 +209,13 @@ public void disableWorkflowProcessesEntityWithEncryptedUriPassword() { final JsonObject encrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); // Disable workflow (newKey=null) should decrypt and return plaintext - final JsonObject result = EncryptionMigrationActor.reEncryptFields( - encrypted, "", POINTERS, OLD_KEY, null); + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, null, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encrypted, context); - assertThat(result).isNotNull(); - assertThat(result.getValue("/uri").get().asString()) + assertThat(result).isPresent(); + assertThat(result.get().getValue("/uri").get().asString()) .isEqualTo("amqps://user:secretpassword@broker.example.com:5671"); - assertThat(result.getValue("/credentials/password").get().asString()) + assertThat(result.get().getValue("/credentials/password").get().asString()) .isEqualTo("mypassword"); } @@ -230,23 +228,21 @@ public void eventWithNoEncryptableFieldsIsSkipped() { .build(); // Initial encryption - assertThat(EncryptionMigrationActor.reEncryptFields( - emptyEvent, "connection", POINTERS, null, NEW_KEY)).isNull(); + final MigrationContext initialContext = MigrationContext.forJournal(null, NEW_KEY, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, initialContext)).isEmpty(); // Key rotation - assertThat(EncryptionMigrationActor.reEncryptFields( - emptyEvent, "connection", POINTERS, OLD_KEY, NEW_KEY)).isNull(); + final MigrationContext rotationContext = MigrationContext.forJournal(OLD_KEY, NEW_KEY, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, rotationContext)).isEmpty(); // Disable workflow - assertThat(EncryptionMigrationActor.reEncryptFields( - emptyEvent, "connection", POINTERS, OLD_KEY, null)).isNull(); + final MigrationContext disableContext = MigrationContext.forJournal(OLD_KEY, null, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, disableContext)).isEmpty(); } - // --- MigrateConnectionEncryption command tests --- - @Test public void commandSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var command = MigrateConnectionEncryption.of(true, false, headers); final JsonObject json = command.toJson(); @@ -262,7 +258,7 @@ public void commandDefaultValues() { final JsonObject minimalJson = JsonFactory.newObjectBuilder() .set("type", MigrateConnectionEncryption.TYPE) .build(); - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var command = MigrateConnectionEncryption.fromJson(minimalJson, headers); @@ -270,11 +266,9 @@ public void commandDefaultValues() { assertThat(command.isResume()).isFalse(); } - // --- MigrateConnectionEncryptionAbort command tests --- - @Test public void abortCommandSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var command = MigrateConnectionEncryptionAbort.of(headers); final JsonObject json = command.toJson(); @@ -284,11 +278,9 @@ public void abortCommandSerializationRoundTrip() { assertThat(deserialized.getType()).isEqualTo("connectivity.commands:migrateEncryptionAbort"); } - // --- MigrateConnectionEncryptionStatus command tests --- - @Test public void statusCommandSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var command = MigrateConnectionEncryptionStatus.of(headers); final JsonObject json = command.toJson(); @@ -298,11 +290,9 @@ public void statusCommandSerializationRoundTrip() { assertThat(deserialized.getType()).isEqualTo("connectivity.commands:migrateEncryptionStatus"); } - // --- Response tests --- - @Test public void acceptedResponseSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var response = MigrateConnectionEncryptionResponse.accepted( false, "2026-02-16T10:00:00Z", false, headers); @@ -318,7 +308,7 @@ public void acceptedResponseSerializationRoundTrip() { @Test public void dryRunResponseSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var response = MigrateConnectionEncryptionResponse.dryRunCompleted( "completed", false, "2026-02-16T10:00:00Z", 100, 10, 2, 200, 20, 5, headers); @@ -337,17 +327,16 @@ public void dryRunResponseSerializationRoundTrip() { @Test public void statusResponseSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); - final var response = MigrateConnectionEncryptionStatusResponse.of( + final var headers = DittoHeaders.empty(); + final var progress = new MigrationProgress( "in_progress:snapshots", - 150, 10, 2, - 0, 0, 0, "507f1f77bcf86cd799439011", "connection:mqtt-prod-sensor-01", null, null, - "2026-02-16T10:00:00Z", "2026-02-16T10:30:00Z", - true, - true, - headers); + 150, 10, 2, + 0, 0, 0, + "2026-02-16T10:00:00Z"); + final var response = MigrateConnectionEncryptionStatusResponse.of( + "in_progress:snapshots", progress, true, true, headers); final JsonObject json = response.toJson(); final var deserialized = MigrateConnectionEncryptionStatusResponse.fromJson(json, headers); @@ -359,7 +348,7 @@ public void statusResponseSerializationRoundTrip() { @Test public void abortResponseSerializationRoundTrip() { - final var headers = org.eclipse.ditto.base.model.headers.DittoHeaders.empty(); + final var headers = DittoHeaders.empty(); final var response = MigrateConnectionEncryptionAbortResponse.of( "aborted:snapshots", 150, 10, 2, @@ -375,32 +364,86 @@ public void abortResponseSerializationRoundTrip() { assertThat(deserialized.getType()).isEqualTo(MigrateConnectionEncryptionAbortResponse.TYPE); } - // --- Progress tracking tests --- - @Test public void migrationProgressTracking() { - final EncryptionMigrationActor.MigrationProgress progress = - new EncryptionMigrationActor.MigrationProgress(); + final MigrationProgress initial = + new MigrationProgress(); - progress.incrementSnapshotsProcessed() + final MigrationProgress afterSnapshots = initial + .incrementSnapshotsProcessed() .incrementSnapshotsProcessed() .incrementSnapshotsSkipped() .incrementSnapshotsFailed(); - assertThat(progress.snapshotsProcessed).isEqualTo(2); - assertThat(progress.snapshotsSkipped).isEqualTo(1); - assertThat(progress.snapshotsFailed).isEqualTo(1); + assertThat(afterSnapshots.snapshotsProcessed()).isEqualTo(2); + assertThat(afterSnapshots.snapshotsSkipped()).isEqualTo(1); + assertThat(afterSnapshots.snapshotsFailed()).isEqualTo(1); + // Original should be unchanged (immutable) + assertThat(initial.snapshotsProcessed()).isEqualTo(0); - progress.withPhase("journal") + final MigrationProgress afterJournal = afterSnapshots + .withPhase("journal") .incrementJournalProcessed() .incrementJournalSkipped(); - assertThat(progress.phase).isEqualTo("journal"); - assertThat(progress.journalProcessed).isEqualTo(1); - assertThat(progress.journalSkipped).isEqualTo(1); + assertThat(afterJournal.phase()).isEqualTo("journal"); + assertThat(afterJournal.journalProcessed()).isEqualTo(1); + assertThat(afterJournal.journalSkipped()).isEqualTo(1); + // Snapshot counts should be preserved + assertThat(afterJournal.snapshotsProcessed()).isEqualTo(2); + } + + @Test + public void bulkWriteFailureAdjustsSnapshotCounters() { + final MigrationProgress progress = + new MigrationProgress() + .incrementSnapshotsProcessed() + .incrementSnapshotsProcessed() + .incrementSnapshotsProcessed(); + + assertThat(progress.snapshotsProcessed()).isEqualTo(3); + assertThat(progress.snapshotsFailed()).isEqualTo(0); + + final MigrationProgress adjusted = + progress.adjustForBulkWriteFailure(2, true); + + assertThat(adjusted.snapshotsProcessed()).isEqualTo(1); + assertThat(adjusted.snapshotsFailed()).isEqualTo(2); + // Journal counters should be unaffected + assertThat(adjusted.journalProcessed()).isEqualTo(0); + assertThat(adjusted.journalFailed()).isEqualTo(0); + } + + @Test + public void bulkWriteFailureAdjustsJournalCounters() { + final MigrationProgress progress = + new MigrationProgress() + .withPhase("journal") + .incrementJournalProcessed() + .incrementJournalProcessed(); + + final MigrationProgress adjusted = + progress.adjustForBulkWriteFailure(2, false); + + assertThat(adjusted.journalProcessed()).isEqualTo(0); + assertThat(adjusted.journalFailed()).isEqualTo(2); + // Snapshot counters should be unaffected + assertThat(adjusted.snapshotsProcessed()).isEqualTo(0); } - // --- Helper methods --- + @Test + public void bulkWriteFailureDoesNotGoNegative() { + final MigrationProgress progress = + new MigrationProgress() + .incrementSnapshotsProcessed(); + + // More failures than processed: should clamp to 0 + final MigrationProgress adjusted = + progress.adjustForBulkWriteFailure(5, true); + + assertThat(adjusted.snapshotsProcessed()).isEqualTo(0); + assertThat(adjusted.snapshotsFailed()).isEqualTo(5); + } private static JsonObject createPlainSnapshotJson() { return JsonFactory.newObjectBuilder() diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationDisableIT.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationDisableIT.java new file mode 100644 index 00000000000..8e29f5d3797 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationDisableIT.java @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.eclipse.ditto.connectivity.service.messaging.persistence.EncryptionMigrationTestHelper.*; + +import java.util.UUID; + +import org.bson.Document; +import org.eclipse.ditto.json.JsonObject; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; + +/** + * System/integration test for encryption migration in disable-encryption mode. + *

+ * Mode: encryption OFF + old key present. + * Behavior: decrypts data encrypted with old key back to plaintext. + *

+ * Requires Docker containers running with the disable-encryption docker-compose override: + *

+ *   docker-compose -f docker-compose.yml -f docker-compose-disable-encryption.yml up -d
+ *   mvn verify -pl connectivity/service -Dtest=EncryptionMigrationDisableIT -Dskip.npm -DfailIfNoTests=false
+ * 
+ */ +public final class EncryptionMigrationDisableIT { + + private static final String GATEWAY_URL = System.getProperty("ditto.gateway.url", "http://localhost:8080"); + private static final String MONGODB_HOST = System.getProperty("ditto.mongodb.host", "localhost"); + private static final int MONGODB_PORT = Integer.getInteger("ditto.mongodb.port", 27017); + + /** + * The old key that was used to encrypt data (must match + * CONNECTIVITY_CONNECTION_OLD_ENCRYPTION_KEY in docker-compose-disable-encryption.yml). + */ + private static final String OLD_KEY = TEST_KEY; + + private MongoClient mongoClient; + private MongoDatabase database; + private MongoCollection snapshotCollection; + + @Before + public void setUp() { + mongoClient = createMongoClient(MONGODB_HOST, MONGODB_PORT); + database = getDatabase(mongoClient); + snapshotCollection = database.getCollection(SNAPSHOT_COLLECTION); + } + + @After + public void tearDown() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + /** + * Tests that migration decrypts encrypted data back to plaintext when running in disable mode. + *

+ * Scenario: Data was previously encrypted. Encryption is now disabled with the old key + * configured for reading. Migration should decrypt all encrypted fields to plaintext. + */ + @Test + public void migrationDecryptsToPlaintext() throws Exception { + // Given: a snapshot with fields encrypted using the old key + final String connId = "disable-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + // Encrypt with old key — simulates data encrypted before encryption was disabled + final JsonObject encryptedData = encryptFields(plainData, "", DEFAULT_POINTERS, OLD_KEY); + insertTestSnapshot(pid, encryptedData); + + // Verify data is currently encrypted + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + assertThat(snapshotBefore).isNotNull(); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + assertThat(isEncryptedValue(jsonBefore.getValue("/credentials/password").get().asString())).isTrue(); + assertThat(isEncryptedValue(jsonBefore.getValue("/uri").get().asString())).isTrue(); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should now be plaintext + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + assertThat(snapshotAfter).isNotNull(); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + + // Fields should be plaintext (no encrypted_ prefix) + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + assertThat(isPlaintextValue(pwdAfter)).isTrue(); + assertThat(pwdAfter).isEqualTo("mySecretPassword"); + + final String uriAfter = jsonAfter.getValue("/uri").get().asString(); + assertThat(isPlaintextValue(uriAfter)).isTrue(); + assertThat(uriAfter).isEqualTo("tcp://user:secretPassword123@broker.example.com:1883"); + } + + /** + * Tests that migration skips data that is already plaintext. + *

+ * Scenario: Some connections were already stored as plaintext (e.g., created after + * encryption was disabled). Migration should detect these and skip them. + */ + @Test + public void migrationSkipsAlreadyPlaintextData() throws Exception { + // Given: a snapshot with plaintext fields + final String connId = "disable-skip-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + // Insert as plaintext + insertTestSnapshot(pid, plainData); + + // Capture state before + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + assertThat(isPlaintextValue(pwdBefore)).isTrue(); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should be unchanged (skipped) + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + assertThat(pwdAfter).isEqualTo(pwdBefore); + assertThat(isPlaintextValue(pwdAfter)).isTrue(); + } + + /** + * Tests that dry-run mode counts documents but does not decrypt them. + */ + @Test + public void dryRunDoesNotDecryptData() throws Exception { + // Given: a snapshot encrypted with old key + final String connId = "disable-dryrun-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + final JsonObject encryptedData = encryptFields(plainData, "", DEFAULT_POINTERS, OLD_KEY); + insertTestSnapshot(pid, encryptedData); + + // Capture encrypted values before + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + assertThat(isEncryptedValue(pwdBefore)).isTrue(); + + // When: run dry-run migration + postMigrationPiggyback(GATEWAY_URL, true, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should still be encrypted + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + // Should be identical (dry-run, no modifications) + assertThat(pwdAfter).isEqualTo(pwdBefore); + assertThat(isEncryptedValue(pwdAfter)).isTrue(); + + // Should still be decryptable with old key + final JsonObject decrypted = decryptFields(jsonAfter, "", DEFAULT_POINTERS, OLD_KEY); + assertThat(decrypted.getValue("/credentials/password").get().asString()) + .isEqualTo("mySecretPassword"); + } + + // --- Test data helpers --- + + private void insertTestSnapshot(final String pid, final JsonObject snapshotJson) throws Exception { + final org.bson.BsonDocument bsonData = org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson + .getInstance().parse(snapshotJson); + final Document s2 = Document.parse(bsonData.toJson()); + + final Document snapshotDoc = new Document() + .append("pid", pid) + .append("sn", 1L) + .append("ts", System.currentTimeMillis()) + .append(SNAPSHOT_SERIALIZED_FIELD, s2); + + blockFirst(snapshotCollection.insertOne(snapshotDoc)); + } +} diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationIT.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationIT.java new file mode 100644 index 00000000000..98304d65027 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationIT.java @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.eclipse.ditto.connectivity.service.messaging.persistence.EncryptionMigrationTestHelper.*; + +import java.util.UUID; + +import org.bson.Document; +import org.eclipse.ditto.json.JsonObject; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; + +/** + * System/integration test for encryption migration in key-rotation mode. + *

+ * Mode: encryption ON + both keys set (old + new). + * Behavior: decrypt with old key, re-encrypt with new key. + *

+ * Requires Docker containers running with the default docker-compose configuration + * that includes both {@code CONNECTIVITY_CONNECTION_ENCRYPTION_KEY} (new key) and + * {@code CONNECTIVITY_CONNECTION_OLD_ENCRYPTION_KEY} (old key). + *

+ * Run with: + *

+ *   docker-compose -f docker-compose.yml -f docker-compose-key-rotation.yml up -d
+ *   mvn verify -pl connectivity/service -Dtest=EncryptionMigrationIT -Dskip.npm -DfailIfNoTests=false
+ * 
+ */ +public final class EncryptionMigrationIT { + + private static final String GATEWAY_URL = System.getProperty("ditto.gateway.url", "http://localhost:8080"); + private static final String MONGODB_HOST = System.getProperty("ditto.mongodb.host", "localhost"); + private static final int MONGODB_PORT = Integer.getInteger("ditto.mongodb.port", 27017); + + /** The old key used to encrypt existing data. */ + private static final String OLD_KEY = TEST_KEY; + + /** The new key for re-encryption (must match docker-compose key-rotation config). */ + private static final String NEW_KEY = "anotherBase64Key256bitsLongForAESGCM32Bytes="; + + private MongoClient mongoClient; + private MongoDatabase database; + private MongoCollection snapshotCollection; + private MongoCollection journalCollection; + + @Before + public void setUp() { + mongoClient = createMongoClient(MONGODB_HOST, MONGODB_PORT); + database = getDatabase(mongoClient); + snapshotCollection = database.getCollection(SNAPSHOT_COLLECTION); + journalCollection = database.getCollection(JOURNAL_COLLECTION); + } + + @After + public void tearDown() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + @Test + public void migrationReEncryptsDataFromOldKeyToNewKey() throws Exception { + // Given: a snapshot with fields encrypted using the old key + final String connId = "key-rotation-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + // Encrypt with old key and insert as a snapshot + final JsonObject encryptedWithOldKey = encryptFields(plainData, "", DEFAULT_POINTERS, OLD_KEY); + insertTestSnapshot(pid, encryptedWithOldKey); + + // Verify it's encrypted with old key + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + assertThat(snapshotBefore).isNotNull(); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String uriBefore = jsonBefore.getValue("/uri").get().asString(); + assertThat(isEncryptedValue(uriBefore)).isTrue(); + + // Verify it can be decrypted with old key + final JsonObject decryptedWithOld = decryptFields(jsonBefore, "", DEFAULT_POINTERS, OLD_KEY); + assertThat(decryptedWithOld.getValue("/credentials/password").get().asString()) + .isEqualTo("mySecretPassword"); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should be re-encrypted with new key + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + assertThat(snapshotAfter).isNotNull(); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + + // Should still be encrypted + final String uriAfter = jsonAfter.getValue("/uri").get().asString(); + assertThat(isEncryptedValue(uriAfter)).isTrue(); + + // Should be decryptable with NEW key (not old) + final JsonObject decryptedWithNew = decryptFields(jsonAfter, "", DEFAULT_POINTERS, NEW_KEY); + assertThat(decryptedWithNew.getValue("/credentials/password").get().asString()) + .isEqualTo("mySecretPassword"); + assertThat(decryptedWithNew.getValue("/uri").get().asString()) + .contains("secretPassword123"); + } + + @Test + public void migrationSkipsAlreadyReEncryptedData() throws Exception { + // Given: a snapshot already encrypted with the new key + final String connId = "skip-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + final JsonObject encryptedWithNewKey = encryptFields(plainData, "", DEFAULT_POINTERS, NEW_KEY); + insertTestSnapshot(pid, encryptedWithNewKey); + + // Capture the encrypted values before migration + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should be unchanged (skipped) + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + // The encrypted value might differ due to random IV, but it should still + // be decryptable with the new key. Since it was skipped, it should be identical. + assertThat(pwdAfter).isEqualTo(pwdBefore); + } + + @Test + public void dryRunDoesNotModifyData() throws Exception { + // Given: a snapshot encrypted with old key + final String connId = "dryrun-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + final JsonObject encryptedWithOldKey = encryptFields(plainData, "", DEFAULT_POINTERS, OLD_KEY); + insertTestSnapshot(pid, encryptedWithOldKey); + + // Capture state before + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + + // When: run dry-run migration + postMigrationPiggyback(GATEWAY_URL, true, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should be unchanged + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + assertThat(pwdAfter).isEqualTo(pwdBefore); + + // Should still be decryptable with OLD key (not re-encrypted) + final JsonObject decrypted = decryptFields(jsonAfter, "", DEFAULT_POINTERS, OLD_KEY); + assertThat(decrypted.getValue("/credentials/password").get().asString()) + .isEqualTo("mySecretPassword"); + } + + // --- Test data helpers --- + + private void insertTestSnapshot(final String pid, final JsonObject snapshotJson) throws Exception { + final org.bson.BsonDocument bsonData = org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson + .getInstance().parse(snapshotJson); + final Document s2 = Document.parse(bsonData.toJson()); + + final Document snapshotDoc = new Document() + .append("pid", pid) + .append("sn", 1L) + .append("ts", System.currentTimeMillis()) + .append(SNAPSHOT_SERIALIZED_FIELD, s2); + + blockFirst(snapshotCollection.insertOne(snapshotDoc)); + } +} diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationInitialEncryptionIT.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationInitialEncryptionIT.java new file mode 100644 index 00000000000..8f8bc625ef2 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationInitialEncryptionIT.java @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.eclipse.ditto.connectivity.service.messaging.persistence.EncryptionMigrationTestHelper.*; + +import java.util.UUID; + +import org.bson.Document; +import org.eclipse.ditto.json.JsonObject; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; + +/** + * System/integration test for encryption migration in initial-encryption mode. + *

+ * Mode: encryption ON + current key only (no old key). + * Behavior: encrypts plaintext data with the current key. + *

+ * Requires Docker containers running with the initial-encryption docker-compose override: + *

+ *   docker-compose -f docker-compose.yml -f docker-compose-initial-encryption.yml up -d
+ *   mvn verify -pl connectivity/service -Dtest=EncryptionMigrationInitialEncryptionIT -Dskip.npm -DfailIfNoTests=false
+ * 
+ */ +public final class EncryptionMigrationInitialEncryptionIT { + + private static final String GATEWAY_URL = System.getProperty("ditto.gateway.url", "http://localhost:8080"); + private static final String MONGODB_HOST = System.getProperty("ditto.mongodb.host", "localhost"); + private static final int MONGODB_PORT = Integer.getInteger("ditto.mongodb.port", 27017); + + /** The current encryption key (must match docker-compose-initial-encryption.yml). */ + private static final String CURRENT_KEY = TEST_KEY; + + private MongoClient mongoClient; + private MongoDatabase database; + private MongoCollection snapshotCollection; + + @Before + public void setUp() { + mongoClient = createMongoClient(MONGODB_HOST, MONGODB_PORT); + database = getDatabase(mongoClient); + snapshotCollection = database.getCollection(SNAPSHOT_COLLECTION); + } + + @After + public void tearDown() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + /** + * Tests that migration encrypts plaintext snapshot data when running in initial-encryption mode. + *

+ * Scenario: Data was stored before encryption was enabled (plaintext in MongoDB). + * After migration, all sensitive fields should be encrypted with the current key. + */ + @Test + public void migrationEncryptsPlaintextData() throws Exception { + // Given: a snapshot with plaintext (unencrypted) fields + final String connId = "init-enc-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + // Insert as plaintext — simulates data stored before encryption was enabled + insertTestSnapshot(pid, plainData); + + // Verify data is currently plaintext + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + assertThat(snapshotBefore).isNotNull(); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + assertThat(isPlaintextValue(jsonBefore.getValue("/credentials/password").get().asString())).isTrue(); + assertThat(isPlaintextValue(jsonBefore.getValue("/uri").get().asString())).isTrue(); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should now be encrypted with current key + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + assertThat(snapshotAfter).isNotNull(); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + + // Fields should be encrypted + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + assertThat(isEncryptedValue(pwdAfter)).isTrue(); + + final String uriAfter = jsonAfter.getValue("/uri").get().asString(); + assertThat(isEncryptedValue(uriAfter)).isTrue(); + + // Should be decryptable with current key + final JsonObject decrypted = decryptFields(jsonAfter, "", DEFAULT_POINTERS, CURRENT_KEY); + assertThat(decrypted.getValue("/credentials/password").get().asString()) + .isEqualTo("mySecretPassword"); + assertThat(decrypted.getValue("/uri").get().asString()) + .contains("secretPassword123"); + } + + /** + * Tests that migration skips data already encrypted with the current key. + *

+ * Scenario: Some connections were already encrypted (e.g., created after encryption was enabled). + * Migration should detect these and skip them. + */ + @Test + public void migrationSkipsAlreadyEncryptedData() throws Exception { + // Given: a snapshot already encrypted with the current key + final String connId = "init-skip-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + + // Encrypt with current key before inserting + final JsonObject encryptedData = encryptFields(plainData, "", DEFAULT_POINTERS, CURRENT_KEY); + insertTestSnapshot(pid, encryptedData); + + // Capture encrypted values before migration + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + + // When: trigger migration + postMigrationPiggyback(GATEWAY_URL, false, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should be unchanged (skipped) + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + // Encrypted value should be identical (was skipped, not re-encrypted) + assertThat(pwdAfter).isEqualTo(pwdBefore); + } + + /** + * Tests that dry-run mode counts documents but does not encrypt them. + */ + @Test + public void dryRunDoesNotEncryptData() throws Exception { + // Given: a snapshot with plaintext fields + final String connId = "init-dryrun-test-" + UUID.randomUUID().toString().substring(0, 8); + final String pid = "connection:" + connId; + final JsonObject plainData = createTestConnectionJson(connId); + insertTestSnapshot(pid, plainData); + + // Capture state before + final Document snapshotBefore = findSnapshot(snapshotCollection, pid); + final JsonObject jsonBefore = extractSnapshotJson(snapshotBefore); + final String pwdBefore = jsonBefore.getValue("/credentials/password").get().asString(); + assertThat(isPlaintextValue(pwdBefore)).isTrue(); + + // When: run dry-run migration + postMigrationPiggyback(GATEWAY_URL, true, false); + waitForMigrationCompleted(GATEWAY_URL, 60); + + // Then: data should still be plaintext + final Document snapshotAfter = findSnapshot(snapshotCollection, pid); + final JsonObject jsonAfter = extractSnapshotJson(snapshotAfter); + final String pwdAfter = jsonAfter.getValue("/credentials/password").get().asString(); + + assertThat(pwdAfter).isEqualTo(pwdBefore); + assertThat(isPlaintextValue(pwdAfter)).isTrue(); + } + + // --- Test data helpers --- + + private void insertTestSnapshot(final String pid, final JsonObject snapshotJson) throws Exception { + final org.bson.BsonDocument bsonData = org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson + .getInstance().parse(snapshotJson); + final Document s2 = Document.parse(bsonData.toJson()); + + final Document snapshotDoc = new Document() + .append("pid", pid) + .append("sn", 1L) + .append("ts", System.currentTimeMillis()) + .append(SNAPSHOT_SERIALIZED_FIELD, s2); + + blockFirst(snapshotCollection.insertOne(snapshotDoc)); + } +} diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationTestHelper.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationTestHelper.java new file mode 100644 index 00000000000..df38b568e38 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/EncryptionMigrationTestHelper.java @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence; + +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.util.Base64; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.bson.BsonDocument; +import org.bson.Document; +import org.eclipse.ditto.internal.utils.persistence.mongo.DittoBsonJson; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonObject; +import org.reactivestreams.Publisher; + +import com.mongodb.client.model.Filters; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; + +/** + * Shared test utilities for encryption migration system/integration tests. + *

+ * Provides crypto helpers, MongoDB access, snapshot manipulation, and piggyback command + * utilities used by all three migration mode test classes. + */ +public final class EncryptionMigrationTestHelper { + + /** Test encryption key (same as in connection-fields-encryption-test.conf). */ + public static final String TEST_KEY = "vJFSTPE9PO2BtZlcMAwNjs8jdFvQCk0Ya9MVdYjRJUU="; + + /** MongoDB collection name for connection snapshots. */ + public static final String SNAPSHOT_COLLECTION = "connection_snaps"; + + /** MongoDB collection name for connection journal events. */ + public static final String JOURNAL_COLLECTION = "connection_journal"; + + /** MongoDB collection name for migration progress tracking. */ + public static final String PROGRESS_COLLECTION = "connection_encryption_migration"; + + /** BSON field containing the serialized snapshot data. */ + public static final String SNAPSHOT_SERIALIZED_FIELD = "s2"; + + /** BSON field containing the journal events array. */ + public static final String JOURNAL_EVENTS_FIELD = "events"; + + /** BSON field containing the journal event payload. */ + public static final String JOURNAL_PAYLOAD_FIELD = "p"; + + /** Default JSON pointers for encryption. */ + public static final List DEFAULT_POINTERS = List.of("/uri", "/credentials/password"); + + /** Prefix used by JsonFieldsEncryptor for encrypted values. */ + public static final String ENCRYPTED_PREFIX = "encrypted_"; + + private static final String DEVOPS_AUTH = "Basic " + + Base64.getEncoder().encodeToString("devops:devopsPw1!".getBytes()); + + private EncryptionMigrationTestHelper() { + // Utility class + } + + // --- MongoDB access --- + + /** + * Creates a MongoDB client connecting to the default Docker-exposed port. + * + * @return the MongoDB client + */ + public static MongoClient createMongoClient() { + return createMongoClient("localhost", 27017); + } + + /** + * Creates a MongoDB client for a specific host and port. + * + * @param host the MongoDB host + * @param port the MongoDB port + * @return the MongoDB client + */ + public static MongoClient createMongoClient(final String host, final int port) { + return MongoClients.create("mongodb://" + host + ":" + port); + } + + /** + * Gets the default Ditto database. + * + * @param client the MongoDB client + * @return the database instance + */ + public static MongoDatabase getDatabase(final MongoClient client) { + return client.getDatabase("connectivity"); + } + + /** + * Finds a snapshot document by connection PID. + * + * @param snapshotCollection the snapshot collection + * @param connectionPid the connection persistence ID (e.g., "connection:my-conn-id") + * @return the snapshot document, or null if not found + */ + public static Document findSnapshot(final MongoCollection snapshotCollection, + final String connectionPid) throws Exception { + return blockFirst(snapshotCollection.find(Filters.eq("pid", connectionPid)).first()); + } + + /** + * Finds a journal document by connection PID. + * + * @param journalCollection the journal collection + * @param connectionPid the connection persistence ID + * @return the journal document, or null if not found + */ + public static Document findFirstJournalEntry(final MongoCollection journalCollection, + final String connectionPid) throws Exception { + return blockFirst(journalCollection.find(Filters.eq("pid", connectionPid)).first()); + } + + // --- Snapshot data extraction --- + + /** + * Extracts the serialized snapshot JSON object from a snapshot document. + * + * @param snapshotDoc the MongoDB snapshot document + * @return the JSON object from the s2 field + */ + public static JsonObject extractSnapshotJson(final Document snapshotDoc) { + final Document s2 = snapshotDoc.get(SNAPSHOT_SERIALIZED_FIELD, Document.class); + final BsonDocument bsonDoc = s2.toBsonDocument(Document.class, + com.mongodb.MongoClientSettings.getDefaultCodecRegistry()); + return DittoBsonJson.getInstance().serialize(bsonDoc); + } + + /** + * Replaces the serialized snapshot JSON in a snapshot document and writes it back to MongoDB. + * + * @param snapshotCollection the snapshot collection + * @param snapshotDoc the original snapshot document + * @param newJson the new JSON to store in the s2 field + */ + public static void replaceSnapshotJson(final MongoCollection snapshotCollection, + final Document snapshotDoc, final JsonObject newJson) throws Exception { + final BsonDocument newBson = DittoBsonJson.getInstance().parse(newJson); + snapshotDoc.put(SNAPSHOT_SERIALIZED_FIELD, Document.parse(newBson.toJson())); + blockFirst(snapshotCollection.replaceOne( + Filters.eq("_id", snapshotDoc.get("_id")), + snapshotDoc)); + } + + // --- Crypto utilities --- + + /** + * Encrypts JSON fields using the same algorithm as the connectivity service. + * + * @param json the JSON object with plaintext fields + * @param entityTypePrefix the entity prefix ("" for snapshots, "connection" for journal) + * @param pointers the JSON pointers to encrypt + * @param key the encryption key + * @return the JSON object with encrypted fields + */ + public static JsonObject encryptFields(final JsonObject json, final String entityTypePrefix, + final List pointers, final String key) { + return JsonFieldsEncryptor.encrypt(json, entityTypePrefix, pointers, key); + } + + /** + * Decrypts JSON fields using the same algorithm as the connectivity service. + * + * @param json the JSON object with encrypted fields + * @param entityTypePrefix the entity prefix ("" for snapshots, "connection" for journal) + * @param pointers the JSON pointers to decrypt + * @param key the decryption key + * @return the JSON object with decrypted fields + */ + public static JsonObject decryptFields(final JsonObject json, final String entityTypePrefix, + final List pointers, final String key) { + return JsonFieldsEncryptor.decrypt(json, entityTypePrefix, pointers, key); + } + + /** + * Checks whether a field value is encrypted (has the encrypted_ prefix). + * + * @param value the string value to check + * @return true if the value is encrypted + */ + public static boolean isEncryptedValue(final String value) { + if (value.startsWith(ENCRYPTED_PREFIX)) { + return true; + } + // Check for encrypted password in URI + try { + final java.net.URI uri = new java.net.URI(value); + if (uri.getScheme() != null && uri.getRawUserInfo() != null) { + final String[] userPass = uri.getRawUserInfo().split(":", 2); + return userPass.length == 2 && userPass[1].startsWith(ENCRYPTED_PREFIX); + } + } catch (final Exception ignored) { + // Not a URI + } + return false; + } + + /** + * Checks whether a field value is plaintext (not encrypted). + * + * @param value the string value to check + * @return true if the value is plaintext + */ + public static boolean isPlaintextValue(final String value) { + return !isEncryptedValue(value); + } + + // --- Piggyback command helpers --- + + /** + * Sends a migration piggyback command to the connectivity service. + * + * @param gatewayBaseUrl the gateway URL (e.g., "http://localhost:8080") + * @param dryRun whether to run in dry-run mode + * @param resume whether to resume a previous migration + * @return the HTTP response body as a string + */ + public static String postMigrationPiggyback(final String gatewayBaseUrl, final boolean dryRun, + final boolean resume) throws Exception { + final String body = """ + { + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryption", + "dryRun": %s, + "resume": %s + } + }""".formatted(dryRun, resume); + + return postPiggyback(gatewayBaseUrl, body); + } + + /** + * Sends a migration status piggyback command to the connectivity service. + * + * @param gatewayBaseUrl the gateway URL + * @return the HTTP response body as a string + */ + public static String postMigrationStatusPiggyback(final String gatewayBaseUrl) throws Exception { + final String body = """ + { + "targetActorSelection": "/user/connectivityRoot/encryptionMigration", + "headers": { + "aggregate": false + }, + "piggybackCommand": { + "type": "connectivity.commands:migrateEncryptionStatus" + } + }"""; + + return postPiggyback(gatewayBaseUrl, body); + } + + /** + * Waits for a migration to complete by polling the status endpoint. + * + * @param gatewayBaseUrl the gateway URL + * @param timeoutSeconds maximum time to wait + * @return the final status response body + */ + public static String waitForMigrationCompleted(final String gatewayBaseUrl, + final int timeoutSeconds) throws Exception { + final long deadline = System.currentTimeMillis() + (timeoutSeconds * 1000L); + + while (System.currentTimeMillis() < deadline) { + final String status = postMigrationStatusPiggyback(gatewayBaseUrl); + final JsonObject statusJson = JsonFactory.newObject(status); + + // Check connectivity-specific response (unwrap if aggregated) + final JsonObject effectiveResponse = unwrapAggregatedResponse(statusJson); + + final String phase = effectiveResponse.getValue("phase") + .map(v -> v.asString()) + .orElse(""); + + if ("completed".equals(phase)) { + return status; + } + + final boolean active = effectiveResponse.getValue("migrationActive") + .map(v -> v.asBoolean()) + .orElse(false); + + if (!active && !phase.startsWith("in_progress")) { + // Migration ended (possibly with error) + return status; + } + + TimeUnit.SECONDS.sleep(1); + } + + throw new AssertionError("Migration did not complete within " + timeoutSeconds + " seconds"); + } + + /** + * Creates a simple test connection JSON for testing purposes. + * + * @param connectionId the connection ID + * @return a JSON object representing a minimal connection with sensitive fields + */ + public static JsonObject createTestConnectionJson(final String connectionId) { + return JsonFactory.newObjectBuilder() + .set("id", connectionId) + .set("name", "test-connection-" + connectionId) + .set("connectionType", "mqtt") + .set("connectionStatus", "closed") + .set("uri", "tcp://user:secretPassword123@broker.example.com:1883") + .set("credentials/password", "mySecretPassword") + .build(); + } + + // --- Internal helpers --- + + private static String postPiggyback(final String gatewayBaseUrl, final String body) + throws Exception { + final HttpClient httpClient = HttpClient.newBuilder() + .connectTimeout(Duration.ofSeconds(10)) + .build(); + + final HttpRequest request = HttpRequest.newBuilder() + .uri(URI.create(gatewayBaseUrl + "/devops/piggyback/connectivity")) + .header("Content-Type", "application/json") + .header("Authorization", DEVOPS_AUTH) + .POST(HttpRequest.BodyPublishers.ofString(body)) + .timeout(Duration.ofSeconds(30)) + .build(); + + final HttpResponse response = httpClient.send(request, + HttpResponse.BodyHandlers.ofString()); + + return response.body(); + } + + private static JsonObject unwrapAggregatedResponse(final JsonObject response) { + // If the response is aggregated (contains "connectivity" key with nested response), + // unwrap it. Otherwise, return as-is. + return response.getValue("connectivity") + .filter(v -> v.isObject()) + .map(v -> v.asObject()) + .orElse(response); + } + + /** + * Blocks on a reactive publisher and returns the first element. + * + * @param publisher the reactive publisher + * @param the element type + * @return the first element, or null if empty + */ + @SuppressWarnings("unchecked") + public static T blockFirst(final Publisher publisher) throws Exception { + final java.util.concurrent.CompletableFuture future = new java.util.concurrent.CompletableFuture<>(); + publisher.subscribe(new org.reactivestreams.Subscriber() { + @Override + public void onSubscribe(final org.reactivestreams.Subscription s) { + s.request(1); + } + + @Override + public void onNext(final T t) { + future.complete(t); + } + + @Override + public void onError(final Throwable t) { + future.completeExceptionally(t); + } + + @Override + public void onComplete() { + if (!future.isDone()) { + future.complete(null); + } + } + }); + return future.get(10, TimeUnit.SECONDS); + } + + /** + * Blocks on a reactive publisher and waits for completion. + * + * @param publisher the reactive publisher + */ + public static void blockComplete(final Publisher publisher) throws Exception { + blockFirst(publisher); + } +} diff --git a/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessorTest.java b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessorTest.java new file mode 100644 index 00000000000..ce0e0415833 --- /dev/null +++ b/connectivity/service/src/test/java/org/eclipse/ditto/connectivity/service/messaging/persistence/migration/DocumentProcessorTest.java @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2026 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.eclipse.ditto.connectivity.service.messaging.persistence.migration; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Optional; + +import org.eclipse.ditto.connectivity.service.messaging.persistence.JsonFieldsEncryptor; +import org.eclipse.ditto.connectivity.service.util.EncryptorAesGcm; +import org.eclipse.ditto.json.JsonFactory; +import org.eclipse.ditto.json.JsonObject; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests for {@link DocumentProcessor} encryption migration logic. + */ +public final class DocumentProcessorTest { + + private static String OLD_KEY; + private static String NEW_KEY; + + private static final List POINTERS = List.of( + "/uri", + "/credentials/password" + ); + + @BeforeClass + public static void initKeys() throws NoSuchAlgorithmException { + OLD_KEY = EncryptorAesGcm.generateAESKeyAsString(); + NEW_KEY = EncryptorAesGcm.generateAESKeyAsString(); + } + + // --- Re-encryption logic tests --- + + @Test + public void reEncryptSnapshotFieldsFromOldKeyToNewKey() { + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); + + assertThat(result).isPresent(); + final JsonObject decryptedWithNewKey = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); + assertThat(decryptedWithNewKey.getValue("/credentials/password")) + .contains(plain.getValue("/credentials/password").get()); + } + + @Test + public void reEncryptJournalFieldsFromOldKeyToNewKey() { + final JsonObject plain = createPlainJournalJson(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt( + plain, "connection", POINTERS, OLD_KEY); + + final MigrationContext context = MigrationContext.forJournal(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); + + assertThat(result).isPresent(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "connection", POINTERS, NEW_KEY); + assertThat(decrypted.getValue("/connection/credentials/password")) + .contains(plain.getValue("/connection/credentials/password").get()); + } + + @Test + public void skipAlreadyMigratedDocument() { + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject encryptedWithNewKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithNewKey, context); + + assertThat(result).isEmpty(); + } + + @Test + public void whenBothKeysFailTreatsAsPlaintextAndEncrypts() { + // When neither old nor new key can decrypt, the data is treated as plaintext + // and encrypted with the new key. This handles the case where data was stored + // before encryption was enabled. + final JsonObject plain = createPlainSnapshotJson(); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); + + // Should encrypt the plaintext data with new key + assertThat(result).isPresent(); + + // Verify it can be decrypted with the new key + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); + assertThat(decrypted).isEqualTo(plain); + } + + @Test + public void initialEncryptionEncryptsPlaintext() { + // Initial encryption: oldKey is null, newKey is set + final JsonObject plain = createPlainSnapshotJson(); + + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); + + assertThat(result).isPresent(); + // Verify it can be decrypted with the new key + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); + assertThat(decrypted).isEqualTo(plain); + } + + @Test + public void initialEncryptionSkipsAlreadyEncrypted() { + // Initial encryption: oldKey is null, data already encrypted with new key + final JsonObject plain = createPlainSnapshotJson(); + final JsonObject alreadyEncrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(alreadyEncrypted, context); + + // Should skip - already encrypted + assertThat(result).isEmpty(); + } + + @Test + public void uriPasswordReEncryptedCorrectly() { + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encryptedWithOldKey = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + final String encryptedUri = encryptedWithOldKey.getValue("/uri").get().asString(); + assertThat(encryptedUri).contains("encrypted_"); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encryptedWithOldKey, context); + + assertThat(result).isPresent(); + final JsonObject decrypted = JsonFieldsEncryptor.decrypt(result.get(), "", POINTERS, NEW_KEY); + assertThat(decrypted.getValue("/uri").map(v -> v.asString())) + .hasValue("amqps://user:secretpassword@broker.example.com:5671"); + } + + @Test + public void plainTextFieldsNotAffected() { + final JsonObject plain = createPlainSnapshotJson(); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); + + assertThat(result).isPresent(); + final String encryptedPwd = result.get().getValue("/credentials/password").get().asString(); + assertThat(encryptedPwd).startsWith("encrypted_"); + } + + @Test + public void initialEncryptionSkipsUriWithAlreadyEncryptedPassword() { + // Bug 1: URI fields like amqps://user:encrypted_XXX@host were not detected + // as already encrypted because startsWith("encrypted_") checks the full URI string + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, NEW_KEY); + + // Verify the URI has encrypted password embedded (not a direct encrypted_ prefix) + final String encUri = encrypted.getValue("/uri").get().asString(); + assertThat(encUri).startsWith("amqps://"); + assertThat(encUri).contains("encrypted_"); + + // Initial encryption (oldKey=null) should detect the encrypted URI and skip + final MigrationContext context = MigrationContext.forSnapshots(null, NEW_KEY, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encrypted, context); + + assertThat(result).isEmpty(); + } + + @Test + public void disableWorkflowSkipsAlreadyPlaintextEntity() { + // Bug 2: decrypt() silently passes through plaintext, so disable workflow + // was counting plaintext entities as "processed" instead of "skipped" + final JsonObject plain = createPlainSnapshotJson(); + + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, null, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(plain, context); + + // Should return empty (skip) because decrypt returns unchanged plaintext + assertThat(result).isEmpty(); + } + + @Test + public void disableWorkflowProcessesEntityWithEncryptedUriPassword() { + final JsonObject plain = JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:secretpassword@broker.example.com:5671") + .set("/credentials/password", "mypassword") + .build(); + final JsonObject encrypted = JsonFieldsEncryptor.encrypt(plain, "", POINTERS, OLD_KEY); + + // Disable workflow (newKey=null) should decrypt and return plaintext + final MigrationContext context = MigrationContext.forSnapshots(OLD_KEY, null, POINTERS); + final Optional result = DocumentProcessor.reEncryptFields(encrypted, context); + + assertThat(result).isPresent(); + assertThat(result.get().getValue("/uri").get().asString()) + .isEqualTo("amqps://user:secretpassword@broker.example.com:5671"); + assertThat(result.get().getValue("/credentials/password").get().asString()) + .isEqualTo("mypassword"); + } + + @Test + public void eventWithNoEncryptableFieldsIsSkipped() { + // Empty events or events without any of the configured pointers should be skipped + final JsonObject emptyEvent = JsonFactory.newObjectBuilder() + .set("/connection/type", "persistence-actor-internal:empty-event") + .set("/connection/effect", "priorityUpdate") + .build(); + + // Initial encryption + final MigrationContext initialContext = MigrationContext.forJournal(null, NEW_KEY, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, initialContext)).isEmpty(); + + // Key rotation + final MigrationContext rotationContext = MigrationContext.forJournal(OLD_KEY, NEW_KEY, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, rotationContext)).isEmpty(); + + // Disable workflow + final MigrationContext disableContext = MigrationContext.forJournal(OLD_KEY, null, POINTERS); + assertThat(DocumentProcessor.reEncryptFields(emptyEvent, disableContext)).isEmpty(); + } + + // --- Helper methods --- + + private static JsonObject createPlainSnapshotJson() { + return JsonFactory.newObjectBuilder() + .set("/uri", "amqps://user:password123@broker.example.com:5671") + .set("/credentials/password", "secretPassword") + .set("/name", "test-connection") + .build(); + } + + private static JsonObject createPlainJournalJson() { + return JsonFactory.newObjectBuilder() + .set("/connection/uri", "amqps://user:password123@broker.example.com:5671") + .set("/connection/credentials/password", "secretPassword") + .set("/connection/name", "test-connection") + .build(); + } +} diff --git a/deployment/helm/ditto/templates/connectivity-deployment.yaml b/deployment/helm/ditto/templates/connectivity-deployment.yaml index fed0fb9404c..59fcb74a807 100644 --- a/deployment/helm/ditto/templates/connectivity-deployment.yaml +++ b/deployment/helm/ditto/templates/connectivity-deployment.yaml @@ -350,6 +350,28 @@ spec: value: "{{ .Values.connectivity.config.connections.kafka.producer.parallelism }}" - name: PEKKO_HTTP_HOSTPOOL_MAX_CONNECTION_LIFETIME value: "{{ .Values.connectivity.config.connections.httpPush.maxConnectionLifetime }}" + {{- if .Values.connectivity.config.connections.encryption.enabled }} + {{- if not .Values.connectivity.config.connections.encryption.secretName }} + {{ fail "encryption.enabled is true but encryption.secretName is not set. Please create a Kubernetes Secret and reference it." }} + {{- end }} + - name: CONNECTIVITY_CONNECTION_ENCRYPTION_ENABLED + value: "true" + - name: CONNECTIVITY_CONNECTION_ENCRYPTION_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.connectivity.config.connections.encryption.secretName }} + key: current-key + - name: CONNECTIVITY_CONNECTION_OLD_ENCRYPTION_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.connectivity.config.connections.encryption.secretName }} + key: old-key + optional: true + {{- end }} + - name: CONNECTIVITY_ENCRYPTION_MIGRATION_BATCH_SIZE + value: "{{ .Values.connectivity.config.connections.encryption.migration.batchSize }}" + - name: CONNECTIVITY_ENCRYPTION_MIGRATION_MAX_DOCS_PER_MINUTE + value: "{{ .Values.connectivity.config.connections.encryption.migration.maxDocumentsPerMinute }}" {{- if .Values.connectivity.extraEnv }} {{- toYaml .Values.connectivity.extraEnv | nindent 12 }} {{- end }} diff --git a/deployment/helm/ditto/values.yaml b/deployment/helm/ditto/values.yaml index ab99794747e..18624b459ce 100644 --- a/deployment/helm/ditto/values.yaml +++ b/deployment/helm/ditto/values.yaml @@ -2170,6 +2170,31 @@ connectivity: queueSize: 1000 # Messages to publish in parallel per Kafka-Publisher (one per connectivity client) parallelism: 10 + # encryption contains configuration for encrypting sensitive connection fields + encryption: + # enabled controls whether field encryption is enabled + enabled: false + # secretName is the name of the Kubernetes Secret containing encryption keys + # The Secret must have the following keys: + # - current-key: The current encryption key (required when enabled=true) + # - old-key: The old encryption key (optional, only needed for key rotation) + # For production deployments, create the Secret before enabling encryption + secretName: "" + # migration contains configuration for the encryption migration process + migration: + # batchSize defines the number of documents processed per batch + # Higher values improve performance but increase memory usage + # Lower values are safer for initial deployments + # Recommendation: Start with 10-20, increase to 50-100 after successful tests + batchSize: 10 + # maxDocumentsPerMinute limits the migration throughput to protect the database + # Protects MongoDB from excessive load during migration + # Set to 0 to disable (not recommended for production) + # Adjust based on cluster capacity: + # Small (<100 connections): 200-500 + # Medium (100-1000 connections): 100-200 + # Large (>1000 connections): 50-100 + maxDocumentsPerMinute: 200 ## ---------------------------------------------------------------------------- ## gateway configuration diff --git a/documentation/src/main/resources/pages/ditto/installation-operating.md b/documentation/src/main/resources/pages/ditto/installation-operating.md index 283c5995ca3..2320f428538 100644 --- a/documentation/src/main/resources/pages/ditto/installation-operating.md +++ b/documentation/src/main/resources/pages/ditto/installation-operating.md @@ -264,10 +264,11 @@ old events that are no longer needed for the event sourcing. Encryption is done using a 256-bit AES symmetrical key and the AES/GCM/NoPadding transformation. #### Symmetric key +To generate it you can run in terminal: -To generate it you can use a convenience method already available -at [EncryptorAesGcm.generateAESKeyAsString()](https://github.com/eclipse-ditto/ditto/blob/master/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/util/EncryptorAesGcm.java#L100) - +```shell +$ openssl rand -base64 32 +``` or you can use the java standard library ```java @@ -275,12 +276,8 @@ or you can use the java standard library keyGen.init(256); javax.crypto.SecretKey aes256SymetricKey = keyGen.generateKey(); ``` - -or with a terminal command. - -```shell -$ openssl rand 32 | basenc --base64url -``` +or use a convenience method already available +at [EncryptorAesGcm.generateAESKeyAsString()](https://github.com/eclipse-ditto/ditto/blob/master/connectivity/service/src/main/java/org/eclipse/ditto/connectivity/service/util/EncryptorAesGcm.java#L100) The key must be **256-bit [Base64-encoded with url-safe alphabet](https://www.rfc-editor.org/rfc/rfc4648#section-5) using the UTF-8** charset. This is done already by the convenience method mentioned @@ -314,7 +311,7 @@ the encrypted parts and save them. #### Encryption key rotation -Since Ditto 3.10.0, it is possible to rotate encryption keys without downtime or data loss using a dual-key configuration +Since Ditto 3.9.0, it is possible to rotate encryption keys without downtime or data loss using a dual-key configuration and a migration command. ##### Dual-key configuration @@ -460,7 +457,7 @@ To rotate an encryption key: - Progress is persisted to allow resuming after abort or service restart - Migration runs in batches to avoid overwhelming the database - The batch size can be configured via `ditto.connectivity.connection.encryption.migration.batch-size` -- Migration is throttled to prevent database overload (default: 100 documents/minute) +- Migration is throttled to prevent database overload (default: 200 documents/minute) - Throttling rate can be configured via `ditto.connectivity.connection.encryption.migration.max-documents-per-minute` - Set throttling to 0 to disable (not recommended for production) @@ -661,7 +658,10 @@ ditto { ] } ] - //... + //... + } + } + } } ``` @@ -707,6 +707,7 @@ ditto { } ] } +} ``` There is a new implementation of the caching signal enrichment facade provider that must be configured to enable this From e495d12df43492475d6c3669e64c43434c4140ad Mon Sep 17 00:00:00 2001 From: Aleksandar Stanchev Date: Fri, 27 Feb 2026 09:31:27 +0200 Subject: [PATCH 5/5] chart version Signed-off-by: Aleksandar Stanchev --- deployment/helm/ditto/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/helm/ditto/Chart.yaml b/deployment/helm/ditto/Chart.yaml index 1bd317bbac9..d4092a96955 100644 --- a/deployment/helm/ditto/Chart.yaml +++ b/deployment/helm/ditto/Chart.yaml @@ -16,7 +16,7 @@ description: | A digital twin is a virtual, cloud based, representation of his real world counterpart (real world “Things”, e.g. devices like sensors, smart heating, connected cars, smart grids, EV charging stations etc). type: application -version: 3.8.14 # chart version is effectively set by release-job +version: 3.8.15 # chart version is effectively set by release-job appVersion: 3.8.12 keywords: - iot-chart